diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 9edd734e..fa9dcbbf 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: 1.16 + go-version: 1.17 - name: Build run: make build diff --git a/.gitignore b/.gitignore index d1bd4d2e..82b2864b 100644 --- a/.gitignore +++ b/.gitignore @@ -39,6 +39,8 @@ config.yaml config_dev.yaml tmp init_test.sh +build.yml .Archive/ .md_configs.data .me_configs.data +.codecc diff --git a/.golangci.yaml b/.golangci.yaml index a78127b5..7a7a074e 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -28,14 +28,7 @@ linters: - bodyclose - gocritic - gocyclo - - misspell - - prealloc - - unparam - - wastedassign - whitespace - - # - nlreturn - # - ifshort # - nestif # - gofumpt # - godox diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..fc44f4c6 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,19 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: local + hooks: + - id: golangci-lint + name: golangci-lint + description: Fast linters runner for Go. + entry: make lint + types: [go] + language: golang + pass_filenames: false + - id: golang-unittest + name: golang-unittest + description: Golang unittest. + entry: make test + types: [go] + language: golang + pass_filenames: false diff --git a/Makefile b/Makefile index 055fe2a7..8931883e 100644 --- a/Makefile +++ b/Makefile @@ -8,9 +8,11 @@ init: # go get -u github.com/golangci/golangci-lint/cmd/golangci-lint curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.43.0 # for make doc - go get -u github.com/swaggo/swag/cmd/swag@v1.6.7 + go install github.com/swaggo/swag/cmd/swag@v1.7.6 # for make mock go install github.com/golang/mock/mockgen@v1.4.4 + # for ginkgo + go install github.com/onsi/ginkgo/v2/ginkgo@latest dep: go mod tidy @@ -37,7 +39,12 @@ lint-dupl: golangci-lint run --no-config --disable-all --enable=dupl test: +# Apple Silicon +ifeq ("$(shell go env GOOS)-$(shell go env GOARCH)","darwin-arm64") + GOARCH=amd64 go test -mod=vendor -gcflags=all=-l $(shell go list ./... | grep -v mock | grep -v docs) -covermode=count -coverprofile .coverage.cov +else go test -mod=vendor -gcflags=all=-l $(shell go list ./... | grep -v mock | grep -v docs) -covermode=count -coverprofile .coverage.cov +endif cov: go tool cover -html=.coverage.cov diff --git a/VERSION b/VERSION index 158c7472..587c5f0c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.9.5 +1.10.3 diff --git a/build/support-files/sql/0017_iam_20211116-1713_mysql.sql b/build/support-files/sql/0017_iam_20211116-1713_mysql.sql new file mode 100644 index 00000000..5e01acf3 --- /dev/null +++ b/build/support-files/sql/0017_iam_20211116-1713_mysql.sql @@ -0,0 +1 @@ +ALTER TABLE `bkiam`.`saas_action` ADD COLUMN `related_environments` text NOT NULL; diff --git a/build/support-files/sql/0018_iam_20220111-1708_mysql.sql b/build/support-files/sql/0018_iam_20220111-1708_mysql.sql new file mode 100644 index 00000000..172b5c95 --- /dev/null +++ b/build/support-files/sql/0018_iam_20220111-1708_mysql.sql @@ -0,0 +1 @@ +ALTER TABLE `bkiam`.`expression` MODIFY COLUMN `type` SMALLINT(5) signed NOT NULL DEFAULT 0; diff --git a/cmd/iam.go b/cmd/iam.go index 1a73aff6..14adccce 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -29,6 +29,9 @@ import ( // init debug entry pool _ "iam/pkg/logging/debug" + // init the pdp + _ "iam/pkg/abac/pdp/evalctx" + "iam/pkg/server" ) @@ -90,6 +93,7 @@ func Start() { // NOTE: should be after initRedis initCaches() initPolicyCacheSettings() + initVerifyAppCodeAppSecret() initSuperAppCode() initSuperUser() initSupportShieldFeatures() diff --git a/cmd/init.go b/cmd/init.go index 33c5f109..5244f035 100644 --- a/cmd/init.go +++ b/cmd/init.go @@ -135,6 +135,10 @@ func initPolicyCacheSettings() { cacheimpls.InitPolicyCacheSettings(globalConfig.PolicyCache.Disabled, globalConfig.PolicyCache.ExpirationDays) } +func initVerifyAppCodeAppSecret() { + cacheimpls.InitVerifyAppCodeAppSecret(globalConfig.EnableBkAuth) +} + func initSuperAppCode() { config.InitSuperAppCode(globalConfig.SuperAppCode) } diff --git a/docs/quick_start/develop.md b/docs/quick_start/develop.md index b2fd5636..e529f451 100644 --- a/docs/quick_start/develop.md +++ b/docs/quick_start/develop.md @@ -1,13 +1,13 @@ # 本地开发环境搭建 -## 1. 安装 go1.16 或更高的版本 +## 1. 安装 go1.17 或更高的版本 [Go: Download and install](https://golang.org/doc/install) ```shell $ go version -go version go1.16.3 darwin/amd64 +go version go1.17.5 darwin/amd64 ``` ## 2. 初始化表结构 @@ -88,4 +88,4 @@ $ make test # 执行单元测试 $ make build # 编译 $ make build-linux # 交叉编译GOOS=linux GOARCH=amd64 $ make serve # 编译并启动 -``` \ No newline at end of file +``` diff --git a/go.mod b/go.mod index f9c44e3f..fc0be8df 100644 --- a/go.mod +++ b/go.mod @@ -1,62 +1,113 @@ module iam -go 1.16 +go 1.17 require ( github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/TencentBlueKing/gopkg v1.0.7 - github.com/TencentBlueKing/iam-go-sdk v0.0.5 - github.com/agiledragon/gomonkey v2.0.2+incompatible + github.com/TencentBlueKing/iam-go-sdk v0.0.8 + github.com/agiledragon/gomonkey/v2 v2.3.1 github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 - github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect - github.com/alicebob/miniredis v2.5.0+incompatible + github.com/alicebob/miniredis/v2 v2.17.0 github.com/dlmiddlecote/sqlstats v1.0.2 github.com/fatih/structs v1.1.0 github.com/getsentry/sentry-go v0.11.0 - github.com/gin-contrib/gzip v0.0.2 // indirect - github.com/gin-gonic/gin v1.7.2 - github.com/go-errors/errors v1.1.1 // indirect - github.com/go-openapi/spec v0.20.3 // indirect - github.com/go-openapi/swag v0.19.15 // indirect - github.com/go-playground/validator/v10 v10.6.1 + github.com/gin-gonic/gin v1.7.7 + github.com/go-playground/validator/v10 v10.9.0 github.com/go-redis/cache/v8 v8.4.1 github.com/go-redis/redis/v8 v8.10.0 github.com/go-sql-driver/mysql v1.6.0 - github.com/gofrs/uuid v4.0.0+incompatible - github.com/golang-jwt/jwt/v4 v4.0.0 + github.com/gofrs/uuid v4.2.0+incompatible + github.com/golang-jwt/jwt/v4 v4.2.0 github.com/golang/mock v1.6.0 - github.com/gomodule/redigo v1.8.2 // indirect - github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a github.com/jmoiron/sqlx v1.2.0 - github.com/json-iterator/go v1.1.11 - github.com/leodido/go-urn v1.2.1 // indirect - github.com/lib/pq v1.8.0 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-isatty v0.0.13 // indirect - github.com/onsi/ginkgo v1.16.5 + github.com/json-iterator/go v1.1.12 + github.com/onsi/ginkgo/v2 v2.0.0 github.com/onsi/gomega v1.17.0 github.com/parnurzeal/gorequest v0.2.16 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/prometheus/client_golang v1.11.0 github.com/sirupsen/logrus v1.8.1 - github.com/smartystreets/assertions v1.1.1 // indirect - github.com/spf13/cobra v1.1.3 - github.com/spf13/viper v1.8.0 + github.com/spf13/cobra v1.3.0 + github.com/spf13/viper v1.10.1 github.com/steinfletcher/apitest v1.5.11 github.com/stretchr/testify v1.7.0 github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14 - github.com/swaggo/gin-swagger v1.3.0 - github.com/swaggo/swag v1.6.7 - github.com/ugorji/go v1.2.6 // indirect + github.com/swaggo/gin-swagger v1.3.3 + github.com/swaggo/swag v1.7.6 github.com/vmihailenco/msgpack/v5 v5.3.4 - github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e // indirect - go.uber.org/atomic v1.9.0 // indirect + go.uber.org/automaxprocs v1.4.0 go.uber.org/multierr v1.7.0 - go.uber.org/zap v1.18.1 - golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect + go.uber.org/zap v1.19.1 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/time v0.0.0-20191024005414-555d28b269f0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 moul.io/http2curl v1.0.0 ) + +require ( + github.com/KyleBanks/depth v1.2.1 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-errors/errors v1.1.1 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.6 // indirect + github.com/go-openapi/spec v0.20.4 // indirect + github.com/go-openapi/swag v0.19.15 // indirect + github.com/go-playground/locales v0.14.0 // indirect + github.com/go-playground/universal-translator v0.18.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/klauspost/compress v1.12.2 // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/lib/pq v1.8.0 // indirect + github.com/magiconair/properties v1.8.5 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.26.0 // indirect + github.com/prometheus/procfs v0.6.0 // indirect + github.com/smartystreets/assertions v1.1.1 // indirect + github.com/spf13/afero v1.6.0 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/ugorji/go/codec v1.2.6 // indirect + github.com/vmihailenco/bufpool v0.1.11 // indirect + github.com/vmihailenco/go-tinylfu v0.2.0 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect + go.opentelemetry.io/otel v0.20.0 // indirect + go.opentelemetry.io/otel/metric v0.20.0 // indirect + go.opentelemetry.io/otel/trace v0.20.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect + golang.org/x/exp v0.0.0-20201221025956-e89b829e73ea // indirect + golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/tools v0.1.8 // indirect + google.golang.org/protobuf v1.27.1 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/go.sum b/go.sum index 05277db2..fb7b4b44 100644 --- a/go.sum +++ b/go.sum @@ -18,6 +18,15 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -26,7 +35,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -46,12 +55,12 @@ github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3 github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= @@ -61,12 +70,12 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/TencentBlueKing/gopkg v1.0.7 h1:Kp/Qt8UtVb1LTr6ZSPXWTGS8KlHhWMqH4JZh7Jguu9E= github.com/TencentBlueKing/gopkg v1.0.7/go.mod h1:oopRfRIMuG0IeZfNR99olKNx7976tYrRM1E1CY2y31M= -github.com/TencentBlueKing/iam-go-sdk v0.0.5 h1:hliT6K1cwOFJNkzlGb5uBeAnUkxyBWzof6eD3UQVm0I= -github.com/TencentBlueKing/iam-go-sdk v0.0.5/go.mod h1:5Z5YMclIZBokfgT9dTJFUcywygZXY9nhloX8rj+zFEc= +github.com/TencentBlueKing/iam-go-sdk v0.0.8 h1:7EkG18XDXSL88HaLr3V+V2Nw25Ndt4yH4xDrqwmapy8= +github.com/TencentBlueKing/iam-go-sdk v0.0.8/go.mod h1:5Z5YMclIZBokfgT9dTJFUcywygZXY9nhloX8rj+zFEc= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/agiledragon/gomonkey v2.0.2+incompatible h1:eXKi9/piiC3cjJD1658mEE2o3NjkJ5vDLgYjCQu0Xlw= -github.com/agiledragon/gomonkey v2.0.2+incompatible/go.mod h1:2NGfXu1a80LLr2cmWXGBDaHEjb1idR6+FVlX5T3D9hw= +github.com/agiledragon/gomonkey/v2 v2.3.1 h1:k+UnUY0EMNYUFUAQVETGY9uUTxjMdnUkP0ARyJS1zzs= +github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= @@ -76,15 +85,17 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= -github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= +github.com/alicebob/miniredis/v2 v2.17.0 h1:EwLdrIS50uczw71Jc7iVSxZluTKj5nfSP8n7ARRnJy0= +github.com/alicebob/miniredis/v2 v2.17.0/go.mod h1:gquAfGbzn92jvtrSC69+6zZnwSODVXVpYDRaGhWaL6I= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -97,40 +108,45 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -141,7 +157,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dlmiddlecote/sqlstats v1.0.2 h1:gSU11YN23D/iY50A2zVYwgXgy072khatTsIW6UPjUtI= github.com/dlmiddlecote/sqlstats v1.0.2/go.mod h1:0CWaIh/Th+z2aI6Q9Jpfg/o21zmGxWhbByHgQSCUQvY= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -161,33 +176,38 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/getsentry/sentry-go v0.11.0 h1:qro8uttJGvNAMr5CLcFI9CHR0aDzXl0Vs3Pmw/oTPg8= github.com/getsentry/sentry-go v0.11.0/go.mod h1:KBQIxiZAetw62Cj8Ri964vAEWVdgfaUCn30Q3bCvANo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w= -github.com/gin-contrib/gzip v0.0.2 h1:VMBkd4ZB1Hl7e1lOA5gEZ/qdD3d9vLIq57xKWgPCCV8= -github.com/gin-contrib/gzip v0.0.2/go.mod h1:YxxswVZIqOvcHEQpsSn+QF5guQtO1dCfy0shBPy4jFc= -github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/gzip v0.0.3 h1:etUaeesHhEORpZMp18zoOhepboiWnFtXrBZxszWUn4k= +github.com/gin-contrib/gzip v0.0.3/go.mod h1:YxxswVZIqOvcHEQpsSn+QF5guQtO1dCfy0shBPy4jFc= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/gin-gonic/gin v1.7.2 h1:Tg03T9yM2xa8j6I3Z3oqLaQRSmKvxPd6g/2HJ6zICFA= -github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/gin-gonic/gin v1.7.4/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/gin-gonic/gin v1.7.7 h1:3DoBmSbJbZAWqXJC3SLjAPfutPJJRN1U5pALB7EeTTs= +github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.1.1 h1:ljK/pL5ltg3qoN+OtN6yCv9HWSfMwxSx90GJCZQxYNg= @@ -203,37 +223,31 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= +github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-playground/validator/v10 v10.6.1 h1:W6TRDXt4WcWp4c4nf/G+6BkGdhiIo0k417gfr+V6u4I= -github.com/go-playground/validator/v10 v10.6.1/go.mod h1:xm76BBt941f7yWdGnI2DVPFFg1UK3YY04qifoXU3lOk= +github.com/go-playground/validator/v10 v10.9.0 h1:NgTtmN58D0m8+UuxtYmGztBJB7VnPgjj221I1QHci2A= +github.com/go-playground/validator/v10 v10.9.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= github.com/go-redis/cache/v8 v8.4.1 h1:jq0fw7hcUJzsoS9qPYjXzcAT/rm19TRMB1vEMG3m950= github.com/go-redis/cache/v8 v8.4.1/go.mod h1:iyYQNUxMsz6cPfTX3h4sT4lUmDXV0mDuEyeAn2o1btI= github.com/go-redis/redis/v8 v8.4.4/go.mod h1:nA0bQuF0i5JFx4Ta9RZxGKXFrQ8cRWntra97f0196iY= @@ -248,21 +262,22 @@ github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22 github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -295,8 +310,6 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= -github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= -github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -309,13 +322,15 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -327,11 +342,17 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -340,23 +361,29 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -365,14 +392,22 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= @@ -393,14 +428,14 @@ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22 github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -426,8 +461,11 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -441,12 +479,11 @@ github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -454,14 +491,20 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= -github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= @@ -472,7 +515,10 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -480,14 +526,16 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -498,14 +546,12 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -516,6 +562,8 @@ github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISq github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -533,21 +581,29 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/otiai10/copy v1.7.0 h1:hVoPiN+t+7d2nzzwMiDHPSOogsWAStewq3TwU05+clE= +github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/parnurzeal/gorequest v0.2.16 h1:T/5x+/4BT+nj+3eSknXmCTnEVGSzFzPGdpqmUVVZXHQ= github.com/parnurzeal/gorequest v0.2.16/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -557,11 +613,12 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= @@ -573,39 +630,42 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= +github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -621,15 +681,16 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -638,9 +699,9 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.0 h1:QRwDgoG8xX+kp69di68D+YYTCWfYEckbZRfUlEIAal0= -github.com/spf13/viper v1.8.0/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= +github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= github.com/steinfletcher/apitest v1.5.11 h1:bG3hq3sA4+oPHln3O/xQ6LzsQgN0J2WJl+6EpydQZ8Q= github.com/steinfletcher/apitest v1.5.11/go.mod h1:cf7Bneo52IIAgpqhP8xaLlzWgAiQ9fHtsDMjeDnZ3so= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -648,7 +709,6 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -660,30 +720,24 @@ github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14 h1:PyYN9JH5jY9j6av01SpfRMb+1DWg/i3MbGOKPxJ2wjM= github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:gxQT6pBGRuIGunNf/+tSOB5OHvguWi8Tbt82WOkf35E= -github.com/swaggo/gin-swagger v1.2.0/go.mod h1:qlH2+W7zXGZkczuL+r2nEBR2JTT+/lX05Nn6vPhc7OI= -github.com/swaggo/gin-swagger v1.3.0 h1:eOmp7r57oUgZPw2dJOjcGNMse9cvXcI4tTqBcnZtPsI= -github.com/swaggo/gin-swagger v1.3.0/go.mod h1:oy1BRA6WvgtCp848lhxce7BnWH4C8Bxa0m5SkWx+cS0= -github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= -github.com/swaggo/swag v1.6.7 h1:e8GC2xDllJZr3omJkm9YfmK0Y56+rMO3cg0JBKNz09s= -github.com/swaggo/swag v1.6.7/go.mod h1:xDhTyuFIujYiN3DKWC/H/83xcfHp+UE/IzWWampG7Zc= +github.com/swaggo/gin-swagger v1.3.3 h1:XHyYmeNVFG5PbyWHG4jXtxOm2P4kiZapDCWsyDDiQ/I= +github.com/swaggo/gin-swagger v1.3.3/go.mod h1:ymsZuGpbbu+S7ZoQ49QPpZoDBj6uqhb8WizgQPVgWl0= +github.com/swaggo/swag v1.7.4/go.mod h1:zD8h6h4SPv7t3l+4BKdRquqW1ASWjKZgT6Qv9z3kNqI= +github.com/swaggo/swag v1.7.6 h1:UbAqHyXkW2J+cDjs5S43MkuYR7a6stB7Am7SK8NBmRg= +github.com/swaggo/swag v1.7.6/go.mod h1:7vLqNYEtYoIsD14wXgy9oDS65MNiDANrPtbk9rnLuj0= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc= github.com/ugorji/go v1.2.6 h1:tGiWC9HENWE2tqYycIqFTNorMmFRVhNwCpDOpWqnk8E= github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0= -github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU= github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ= github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= @@ -714,14 +768,15 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e h1:oIpIX9VKxSCFrfjsKpluGbNPBGq9iNnT9crH781j9wY= -github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= +github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -740,14 +795,16 @@ go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiW go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0= +go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -757,22 +814,24 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -799,7 +858,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -814,12 +872,13 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -833,13 +892,13 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -864,8 +923,15 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -877,7 +943,12 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -899,7 +970,6 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -909,16 +979,17 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -928,6 +999,7 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -952,16 +1024,33 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -970,8 +1059,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -991,19 +1081,15 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1041,8 +1127,12 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1070,7 +1160,18 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1120,7 +1221,29 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1145,7 +1268,16 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1157,24 +1289,25 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= diff --git a/iam_suite_test.go b/iam_suite_test.go index 26e77a6a..9ab58d2a 100644 --- a/iam_suite_test.go +++ b/iam_suite_test.go @@ -13,7 +13,7 @@ package main_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/main.go b/main.go index b0abf74c..f4a164d3 100644 --- a/main.go +++ b/main.go @@ -11,6 +11,8 @@ package main import ( + _ "go.uber.org/automaxprocs" + "iam/cmd" ) diff --git a/pkg/abac/pdp/condition/and.go b/pkg/abac/pdp/condition/and.go index e7676c83..3e120b69 100644 --- a/pkg/abac/pdp/condition/and.go +++ b/pkg/abac/pdp/condition/and.go @@ -20,11 +20,14 @@ import ( // AndCondition 逻辑AND type AndCondition struct { - content []Condition + baseLogicalCondition } func NewAndCondition(content []Condition) Condition { - return &AndCondition{content: content} + return &AndCondition{ + baseLogicalCondition{ + content: content, + }} } func newAndCondition(field string, values []interface{}) (Condition, error) { @@ -43,7 +46,7 @@ func newAndCondition(field string, values []interface{}) (Condition, error) { conditions = append(conditions, condition) } - return &AndCondition{content: conditions}, nil + return &AndCondition{baseLogicalCondition{content: conditions}}, nil } // GetName 名称 @@ -51,15 +54,6 @@ func (c *AndCondition) GetName() string { return operator.AND } -// GetKeys 返回嵌套条件中所有包含的属性key -func (c *AndCondition) GetKeys() []string { - keys := make([]string, 0, len(c.content)) - for _, condition := range c.content { - keys = append(keys, condition.GetKeys()...) - } - return keys -} - // Eval 求值 func (c *AndCondition) Eval(ctx types.EvalContextor) bool { for _, condition := range c.content { diff --git a/pkg/abac/pdp/condition/and_test.go b/pkg/abac/pdp/condition/and_test.go index 29d70915..7ba2475a 100644 --- a/pkg/abac/pdp/condition/and_test.go +++ b/pkg/abac/pdp/condition/and_test.go @@ -11,7 +11,7 @@ package condition import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pdp/condition/operator" @@ -19,30 +19,33 @@ import ( var _ = Describe("And", func() { wantAndCondition := &AndCondition{ - content: []Condition{ - &StringEqualsCondition{ - baseCondition: baseCondition{ - Key: "system", - Value: []interface{}{"linux"}, + baseLogicalCondition{ + content: []Condition{ + &StringEqualsCondition{ + baseCondition: baseCondition{ + Key: "system", + Value: []interface{}{"linux"}, + }, }, - }, - &StringPrefixCondition{ - baseCondition: baseCondition{ - Key: "path", - Value: []interface{}{"/biz,1/"}, + &StringPrefixCondition{ + baseCondition: baseCondition{ + Key: "path", + Value: []interface{}{"/biz,1/"}, + }, }, - }, - }, + }}, } var c *AndCondition BeforeEach(func() { c1, _ := newStringEqualsCondition("k1", []interface{}{"a", "b"}) - c2, _ := newNumericEqualsCondition("k1", []interface{}{"b", "c"}) + c2, _ := newNumericEqualsCondition("k2", []interface{}{"b", "c"}) c = &AndCondition{ - []Condition{ - c1, - c2, + baseLogicalCondition{ + content: []Condition{ + c1, + c2, + }, }, } }) @@ -86,22 +89,6 @@ var _ = Describe("And", func() { assert.Equal(GinkgoT(), operator.AND, c.GetName()) }) - It("GetKeys", func() { - oc := AndCondition{ - content: []Condition{ - &StringEqualsCondition{ - baseCondition{ - Key: "hello", - }, - }, - }, - } - - keys := oc.GetKeys() - assert.Len(GinkgoT(), keys, 1) - assert.Equal(GinkgoT(), "hello", keys[0]) - }) - It("Eval", func() { // k1 in [a, b] AND k1 in [b, c] assert.True(GinkgoT(), c.Eval(strCtx("b"))) diff --git a/pkg/abac/pdp/condition/any_test.go b/pkg/abac/pdp/condition/any_test.go index e07eb594..91f0b907 100644 --- a/pkg/abac/pdp/condition/any_test.go +++ b/pkg/abac/pdp/condition/any_test.go @@ -11,7 +11,7 @@ package condition import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/abac/pdp/condition/base_condition.go b/pkg/abac/pdp/condition/base_condition.go index adadbfe6..ee275ee2 100644 --- a/pkg/abac/pdp/condition/base_condition.go +++ b/pkg/abac/pdp/condition/base_condition.go @@ -24,6 +24,17 @@ func (c *baseCondition) GetKeys() []string { return []string{c.Key} } +func (c *baseCondition) HasKey(f keyMatchFunc) bool { + return f(c.Key) +} + +func (c *baseCondition) GetFirstMatchKeyValues(f keyMatchFunc) ([]interface{}, bool) { + if f(c.Key) { + return c.Value, true + } + return nil, false +} + // GetValues 如果Value中有参数, 获取参数的值 func (c *baseCondition) GetValues() []interface{} { return c.Value diff --git a/pkg/abac/pdp/condition/base_condition_test.go b/pkg/abac/pdp/condition/base_condition_test.go index 3c00a8bd..73a457cc 100644 --- a/pkg/abac/pdp/condition/base_condition_test.go +++ b/pkg/abac/pdp/condition/base_condition_test.go @@ -14,7 +14,7 @@ import ( "errors" "strings" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) @@ -127,6 +127,51 @@ var _ = Describe("BaseCondition", func() { }) }) + Describe("HasKey", func() { + It("ok", func() { + expectedKey := "test" + + c := baseCondition{ + Key: expectedKey, + Value: nil, + } + + assert.True(GinkgoT(), c.HasKey(func(key string) bool { + return key == expectedKey + })) + }) + }) + + Describe("GetFirstMatchKeyValues", func() { + It("ok", func() { + expectedValues := []interface{}{1, "ab", 3} + c := baseCondition{ + Key: "test", + Value: expectedValues, + } + + v, ok := c.GetFirstMatchKeyValues(func(key string) bool { + return key == "test" + }) + assert.True(GinkgoT(), ok) + + assert.Equal(GinkgoT(), expectedValues, v) + }) + + It("ont ok", func() { + expectedValues := []interface{}{1, "ab", 3} + c := baseCondition{ + Key: "test", + Value: expectedValues, + } + + _, ok := c.GetFirstMatchKeyValues(func(key string) bool { + return key == "abc" + }) + assert.False(GinkgoT(), ok) + }) + }) + Describe("GetValues", func() { It("ok", func() { expectedValues := []interface{}{1, "ab", 3} diff --git a/pkg/abac/pdp/condition/base_logical_condition.go b/pkg/abac/pdp/condition/base_logical_condition.go new file mode 100644 index 00000000..bf4c73fd --- /dev/null +++ b/pkg/abac/pdp/condition/base_logical_condition.go @@ -0,0 +1,33 @@ +package condition + +type baseLogicalCondition struct { + content []Condition +} + +// GetKeys 返回嵌套条件中所有包含的属性key +func (c *baseLogicalCondition) GetKeys() []string { + keys := make([]string, 0, len(c.content)) + for _, condition := range c.content { + keys = append(keys, condition.GetKeys()...) + } + return keys +} + +func (c *baseLogicalCondition) HasKey(f keyMatchFunc) bool { + for _, condition := range c.content { + if condition.HasKey(f) { + return true + } + } + return false +} + +func (c *baseLogicalCondition) GetFirstMatchKeyValues(f keyMatchFunc) ([]interface{}, bool) { + for _, condition := range c.content { + // got the first one + if values, ok := condition.GetFirstMatchKeyValues(f); ok { + return values, ok + } + } + return nil, false +} diff --git a/pkg/abac/pdp/condition/base_logical_condition_test.go b/pkg/abac/pdp/condition/base_logical_condition_test.go new file mode 100644 index 00000000..067ce42c --- /dev/null +++ b/pkg/abac/pdp/condition/base_logical_condition_test.go @@ -0,0 +1,61 @@ +package condition + +import ( + . "github.com/onsi/ginkgo/v2" + "github.com/stretchr/testify/assert" +) + +var _ = Describe("BaseLogicalCondition", func() { + var c *baseLogicalCondition + BeforeEach(func() { + c1, _ := newStringEqualsCondition("k1", []interface{}{"a", "b"}) + c2, _ := newNumericEqualsCondition("k2", []interface{}{"b", "c"}) + c = &baseLogicalCondition{ + content: []Condition{ + c1, + c2, + }, + } + }) + + It("GetKeys", func() { + keys := c.GetKeys() + assert.Len(GinkgoT(), keys, 2) + assert.Contains(GinkgoT(), keys, "k1") + assert.Contains(GinkgoT(), keys, "k2") + }) + + Describe("HasKey", func() { + It("ok", func() { + ok1 := c.HasKey(func(key string) bool { + return key == "k1" + }) + assert.True(GinkgoT(), ok1) + }) + + It("not ok", func() { + ok2 := c.HasKey(func(key string) bool { + return key == "k3" + }) + assert.False(GinkgoT(), ok2) + }) + }) + + Describe("GetFirstMatchKeyValues", func() { + It("ok", func() { + v, ok := c.GetFirstMatchKeyValues(func(key string) bool { + return key == "k1" + }) + assert.True(GinkgoT(), ok) + assert.Equal(GinkgoT(), []interface{}{"a", "b"}, v) + }) + + It("not ok", func() { + _, ok := c.GetFirstMatchKeyValues(func(key string) bool { + return key == "k3" + }) + assert.False(GinkgoT(), ok) + }) + + }) +}) diff --git a/pkg/abac/pdp/condition/bool_test.go b/pkg/abac/pdp/condition/bool_test.go index 18db59c3..3d326f1e 100644 --- a/pkg/abac/pdp/condition/bool_test.go +++ b/pkg/abac/pdp/condition/bool_test.go @@ -11,7 +11,7 @@ package condition import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/abac/pdp/condition/condition_suite_test.go b/pkg/abac/pdp/condition/condition_suite_test.go index ef7f456c..8dc4d8de 100644 --- a/pkg/abac/pdp/condition/condition_suite_test.go +++ b/pkg/abac/pdp/condition/condition_suite_test.go @@ -13,7 +13,7 @@ package condition_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/abac/pdp/condition/init.go b/pkg/abac/pdp/condition/init.go index 03347f9c..19aba39d 100644 --- a/pkg/abac/pdp/condition/init.go +++ b/pkg/abac/pdp/condition/init.go @@ -25,16 +25,13 @@ import ( 1. 条件之间没有隐含的关系 每个条件都是 {"operator": {"filed": values}} */ -const ( - iamPath = "_bk_iam_path_" - iamPathSuffix = "." + iamPath -) - var errMustNotEmpty = errors.New("value must not be empty") // conditionFunc define the func which keyword match to func be called type conditionFunc func(key string, values []interface{}) (Condition, error) +type keyMatchFunc func(key string) bool + var conditionFactories map[string]conditionFunc func init() { @@ -58,6 +55,11 @@ type Condition interface { GetName() string GetKeys() []string // 返回条件中包含的所有属性key + // HasKey return true if match key, use a keyMatchFunc to verify if matched + HasKey(f keyMatchFunc) bool + // GetFirstMatchKeyValues retrieve the first match key's values from condition + GetFirstMatchKeyValues(f keyMatchFunc) ([]interface{}, bool) + Eval(ctx types.EvalContextor) bool Translate(withSystem bool) (map[string]interface{}, error) } diff --git a/pkg/abac/pdp/condition/init_test.go b/pkg/abac/pdp/condition/init_test.go index 63321c19..191ffbd5 100644 --- a/pkg/abac/pdp/condition/init_test.go +++ b/pkg/abac/pdp/condition/init_test.go @@ -11,7 +11,7 @@ package condition import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pdp/types" @@ -20,17 +20,19 @@ import ( var _ = Describe("Condition", func() { wantAndCondition := &AndCondition{ - content: []Condition{ - &StringEqualsCondition{ - baseCondition: baseCondition{ - Key: "system", - Value: []interface{}{"linux"}, + baseLogicalCondition{ + content: []Condition{ + &StringEqualsCondition{ + baseCondition: baseCondition{ + Key: "system", + Value: []interface{}{"linux"}, + }, }, - }, - &StringPrefixCondition{ - baseCondition: baseCondition{ - Key: "path", - Value: []interface{}{"/biz,1/"}, + &StringPrefixCondition{ + baseCondition: baseCondition{ + Key: "path", + Value: []interface{}{"/biz,1/"}, + }, }, }, }, diff --git a/pkg/abac/pdp/condition/numeric_cmp.go b/pkg/abac/pdp/condition/numeric_cmp.go index b0306a64..15ee8f44 100644 --- a/pkg/abac/pdp/condition/numeric_cmp.go +++ b/pkg/abac/pdp/condition/numeric_cmp.go @@ -53,7 +53,7 @@ func newNumericCompareCondition( func newNumericEqualsCondition(key string, values []interface{}) (Condition, error) { return newNumericCompareCondition(key, values, - operator.NumericEquals, "eq", "in", eval.Equal) + operator.NumericEquals, "eq", "in", eval.ValueEqual) } func newNumericGreaterThanCondition(key string, values []interface{}) (Condition, error) { diff --git a/pkg/abac/pdp/condition/numeric_cmp_test.go b/pkg/abac/pdp/condition/numeric_cmp_test.go index 262ad3ce..e6ca3bd3 100644 --- a/pkg/abac/pdp/condition/numeric_cmp_test.go +++ b/pkg/abac/pdp/condition/numeric_cmp_test.go @@ -12,7 +12,7 @@ package condition import ( "github.com/TencentBlueKing/iam-go-sdk/expression/eval" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pdp/condition/operator" @@ -109,6 +109,25 @@ var _ = Describe("NumericCompare", func() { assert.True(GinkgoT(), lteCondition.Eval(intCtx(2))) }) + It("true, different type", func() { + assert.True(GinkgoT(), eqCondition.Eval(int64Ctx(1))) + assert.True(GinkgoT(), eqCondition.Eval(int64Ctx(2))) + + assert.True(GinkgoT(), gtCondition.Eval(int64Ctx(3))) + assert.True(GinkgoT(), gtCondition.Eval(int64Ctx(4))) + + assert.True(GinkgoT(), gteCondition.Eval(int64Ctx(2))) + assert.True(GinkgoT(), gteCondition.Eval(int64Ctx(3))) + assert.True(GinkgoT(), gteCondition.Eval(int64Ctx(4))) + + assert.True(GinkgoT(), ltCondition.Eval(int64Ctx(0))) + assert.True(GinkgoT(), ltCondition.Eval(int64Ctx(1))) + + assert.True(GinkgoT(), lteCondition.Eval(int64Ctx(0))) + assert.True(GinkgoT(), lteCondition.Eval(int64Ctx(1))) + assert.True(GinkgoT(), lteCondition.Eval(int64Ctx(2))) + }) + It("false", func() { assert.False(GinkgoT(), eqCondition.Eval(intCtx(3))) diff --git a/pkg/abac/pdp/condition/or.go b/pkg/abac/pdp/condition/or.go index 08b0258b..72e48f91 100644 --- a/pkg/abac/pdp/condition/or.go +++ b/pkg/abac/pdp/condition/or.go @@ -20,11 +20,15 @@ import ( // OrCondition 逻辑OR type OrCondition struct { - content []Condition + baseLogicalCondition } func NewOrCondition(content []Condition) Condition { - return &OrCondition{content: content} + return &OrCondition{ + baseLogicalCondition{ + content: content, + }, + } } func newOrCondition(field string, values []interface{}) (Condition, error) { @@ -47,7 +51,7 @@ func newOrCondition(field string, values []interface{}) (Condition, error) { conditions = append(conditions, condition) } - return &OrCondition{content: conditions}, nil + return &OrCondition{baseLogicalCondition{content: conditions}}, nil } // GetName 名称 @@ -55,15 +59,6 @@ func (c *OrCondition) GetName() string { return operator.OR } -// GetKeys 返回嵌套条件中所有包含的属性key -func (c *OrCondition) GetKeys() []string { - keys := make([]string, 0, len(c.content)) - for _, condition := range c.content { - keys = append(keys, condition.GetKeys()...) - } - return keys -} - // Eval 求值 func (c *OrCondition) Eval(ctx types.EvalContextor) bool { for _, condition := range c.content { diff --git a/pkg/abac/pdp/condition/or_test.go b/pkg/abac/pdp/condition/or_test.go index e119bfcd..aaee0828 100644 --- a/pkg/abac/pdp/condition/or_test.go +++ b/pkg/abac/pdp/condition/or_test.go @@ -11,7 +11,7 @@ package condition import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pdp/condition/operator" @@ -19,17 +19,19 @@ import ( var _ = Describe("Or", func() { wantOrCondition := &OrCondition{ - content: []Condition{ - &StringEqualsCondition{ - baseCondition: baseCondition{ - Key: "system", - Value: []interface{}{"linux"}, + baseLogicalCondition{ + content: []Condition{ + &StringEqualsCondition{ + baseCondition: baseCondition{ + Key: "system", + Value: []interface{}{"linux"}, + }, }, - }, - &StringPrefixCondition{ - baseCondition: baseCondition{ - Key: "path", - Value: []interface{}{"/biz,1/"}, + &StringPrefixCondition{ + baseCondition: baseCondition{ + Key: "path", + Value: []interface{}{"/biz,1/"}, + }, }, }, }, @@ -40,9 +42,11 @@ var _ = Describe("Or", func() { c1, _ := newStringEqualsCondition("k1", []interface{}{"a", "b"}) c2, _ := newNumericEqualsCondition("k2", []interface{}{123}) c = &OrCondition{ - []Condition{ - c1, - c2, + baseLogicalCondition{ + []Condition{ + c1, + c2, + }, }, } }) @@ -93,22 +97,6 @@ var _ = Describe("Or", func() { assert.False(GinkgoT(), c.Eval(intCtx(456))) }) - It("GetKeys", func() { - oc := OrCondition{ - content: []Condition{ - &StringEqualsCondition{ - baseCondition{ - Key: "hello", - }, - }, - }, - } - - keys := oc.GetKeys() - assert.Len(GinkgoT(), keys, 1) - assert.Equal(GinkgoT(), "hello", keys[0]) - }) - Describe("Translate", func() { It("ok, empty", func() { want := map[string]interface{}{ @@ -234,7 +222,7 @@ var _ = Describe("Or", func() { allowed, nc := c.(LogicalCondition).PartialEval(HitStrCtx("windows")) assert.False(GinkgoT(), allowed) assert.Nil(GinkgoT(), nc) - //assert.Equal(GinkgoT(), NewAnyCondition(), nc) + // assert.Equal(GinkgoT(), NewAnyCondition(), nc) }) It("true", func() { @@ -410,7 +398,7 @@ var _ = Describe("Or", func() { "host.system": "windows", }) assert.True(GinkgoT(), allowed) - //assert.Nil(GinkgoT(), nc) + // assert.Nil(GinkgoT(), nc) ct, err := nc.Translate(true) assert.NoError(GinkgoT(), err) got := map[string]interface{}{"field": "subject.type", "op": "in", "value": []interface{}{"mysql", "linux"}} diff --git a/pkg/abac/pdp/condition/string_equals_test.go b/pkg/abac/pdp/condition/string_equals_test.go index b410d466..30b641d1 100644 --- a/pkg/abac/pdp/condition/string_equals_test.go +++ b/pkg/abac/pdp/condition/string_equals_test.go @@ -11,7 +11,7 @@ package condition import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/abac/pdp/condition/string_prefix.go b/pkg/abac/pdp/condition/string_prefix.go index 54057cc0..5e61cc15 100644 --- a/pkg/abac/pdp/condition/string_prefix.go +++ b/pkg/abac/pdp/condition/string_prefix.go @@ -15,6 +15,7 @@ import ( "iam/pkg/abac/pdp/condition/operator" "iam/pkg/abac/pdp/types" + abacTypes "iam/pkg/abac/types" ) // StringPrefixCondition 字符串前缀匹配 @@ -51,7 +52,7 @@ func (c *StringPrefixCondition) Eval(ctx types.EvalContextor) bool { // 支持表达式中最后一个节点为任意 // /biz,1/set,*/ -> /biz,1/set, - if strings.HasSuffix(c.Key, iamPathSuffix) && strings.HasSuffix(bStr, ",*/") { + if strings.HasSuffix(c.Key, abacTypes.IamPathSuffix) && strings.HasSuffix(bStr, ",*/") { bStr = bStr[0 : len(bStr)-2] } diff --git a/pkg/abac/pdp/condition/string_prefix_test.go b/pkg/abac/pdp/condition/string_prefix_test.go index ae3235d6..3c522aae 100644 --- a/pkg/abac/pdp/condition/string_prefix_test.go +++ b/pkg/abac/pdp/condition/string_prefix_test.go @@ -11,7 +11,9 @@ package condition import ( - . "github.com/onsi/ginkgo" + abacTypes "iam/pkg/abac/types" + + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) @@ -74,7 +76,7 @@ var _ = Describe("StringPrefix", func() { It("_bk_iam_path_", func() { c = &StringPrefixCondition{ baseCondition{ - Key: "bk_test" + iamPathSuffix, + Key: "bk_test" + abacTypes.IamPathSuffix, Value: []interface{}{"/biz,1/set,*/"}, }, } diff --git a/pkg/abac/pdp/entrance.go b/pkg/abac/pdp/entrance.go index fc92c608..1aae3d48 100644 --- a/pkg/abac/pdp/entrance.go +++ b/pkg/abac/pdp/entrance.go @@ -14,12 +14,13 @@ import ( "database/sql" "errors" "fmt" + "time" "github.com/TencentBlueKing/gopkg/errorx" + "iam/pkg/abac/pdp/evalctx" "iam/pkg/abac/pdp/evaluation" "iam/pkg/abac/pdp/translate" - pdptypes "iam/pkg/abac/pdp/types" "iam/pkg/abac/types" "iam/pkg/abac/types/request" "iam/pkg/cacheimpls" @@ -44,7 +45,11 @@ PDP 模块鉴权入口结构与鉴权函数定义 */ // PDP ... -const PDP = "PDP" +const ( + PDP = "PDP" + + DefaultTz = "Asia/Shanghai" +) // EmptyPolicies ... var ( @@ -138,8 +143,12 @@ func Eval( } debug.AddStep(entry, "Eval") + if entry != nil { + envs, _ := evalctx.GenTimeEnvsFromCache(DefaultTz, time.Now()) + debug.WithValue(entry, "env", envs) + } var passPolicyID int64 - isPass, passPolicyID, err = evaluation.EvalPolicies(pdptypes.NewEvalContext(r), policies) + isPass, passPolicyID, err = evaluation.EvalPolicies(evalctx.NewEvalContext(r), policies) if err != nil { err = errorWrapf(err, "single local evaluation.EvalPolicies policies=`%+v`, request=`%+v` fail", policies, *r) diff --git a/pkg/abac/pdp/entrance_test.go b/pkg/abac/pdp/entrance_test.go index 0f2d14fc..a4ce2813 100644 --- a/pkg/abac/pdp/entrance_test.go +++ b/pkg/abac/pdp/entrance_test.go @@ -14,15 +14,15 @@ import ( "errors" "reflect" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pdp/condition" + "iam/pkg/abac/pdp/evalctx" "iam/pkg/abac/pdp/evaluation" "iam/pkg/abac/pdp/translate" - pdptypes "iam/pkg/abac/pdp/types" "iam/pkg/abac/types" "iam/pkg/abac/types/request" "iam/pkg/logging/debug" @@ -54,10 +54,6 @@ var _ = Describe("Entrance", func() { } patches = gomonkey.NewPatches() - patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionResource", - func(_ *request.Request) bool { - return true - }) }) AfterEach(func() { ctl.Finish() @@ -94,6 +90,10 @@ var _ = Describe("Entrance", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return errors.New("fill subject fail") }) @@ -109,6 +109,10 @@ var _ = Describe("Entrance", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return nil }) @@ -131,6 +135,10 @@ var _ = Describe("Entrance", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return nil }) @@ -143,7 +151,7 @@ var _ = Describe("Entrance", func() { return []types.AuthPolicy{}, nil }) patches.ApplyFunc(evaluation.EvalPolicies, func( - ctx *pdptypes.EvalContext, policies []types.AuthPolicy, + ctx *evalctx.EvalContext, policies []types.AuthPolicy, ) (isPass bool, policyID int64, err error) { return true, 1, nil }) @@ -157,6 +165,10 @@ var _ = Describe("Entrance", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return nil }) @@ -169,7 +181,7 @@ var _ = Describe("Entrance", func() { return []types.AuthPolicy{}, nil }) patches.ApplyFunc(evaluation.EvalPolicies, func( - ctx *pdptypes.EvalContext, policies []types.AuthPolicy, + ctx *evalctx.EvalContext, policies []types.AuthPolicy, ) (isPass bool, policyID int64, err error) { return false, -1, nil }) @@ -183,6 +195,10 @@ var _ = Describe("Entrance", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return nil }) @@ -195,7 +211,7 @@ var _ = Describe("Entrance", func() { return []types.AuthPolicy{}, nil }) patches.ApplyFunc(evaluation.EvalPolicies, func( - ctx *pdptypes.EvalContext, policies []types.AuthPolicy, + ctx *evalctx.EvalContext, policies []types.AuthPolicy, ) (isPass bool, policyID int64, err error) { return false, -1, errors.New("eval fail") }) @@ -211,6 +227,10 @@ var _ = Describe("Entrance", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return nil }) @@ -223,7 +243,7 @@ var _ = Describe("Entrance", func() { return []types.AuthPolicy{}, nil }) patches.ApplyFunc(evaluation.EvalPolicies, func( - ctx *pdptypes.EvalContext, policies []types.AuthPolicy, + ctx *evalctx.EvalContext, policies []types.AuthPolicy, ) (isPass bool, policyID int64, err error) { return false, -1, errors.New("test") }) @@ -237,6 +257,10 @@ var _ = Describe("Entrance", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return nil }) @@ -249,7 +273,7 @@ var _ = Describe("Entrance", func() { return []types.AuthPolicy{}, nil }) patches.ApplyFunc(evaluation.EvalPolicies, func( - ctx *pdptypes.EvalContext, policies []types.AuthPolicy, + ctx *evalctx.EvalContext, policies []types.AuthPolicy, ) (isPass bool, policyID int64, err error) { return true, 1, nil }) diff --git a/pkg/abac/pdp/evalctx/context.go b/pkg/abac/pdp/evalctx/context.go new file mode 100644 index 00000000..580551d7 --- /dev/null +++ b/pkg/abac/pdp/evalctx/context.go @@ -0,0 +1,172 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available. + * Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package evalctx + +import ( + "fmt" + "strconv" + "strings" + "time" + + "iam/pkg/abac/pdp/condition" + pdptypes "iam/pkg/abac/pdp/types" + "iam/pkg/abac/types" + "iam/pkg/abac/types/request" +) + +/* +PDP模块表达式求值 +*/ + +// EvalContext 表达式求值上下文 +// 只有一个Resource的信息 +type EvalContext struct { + *request.Request + objSet pdptypes.ObjectSetInterface +} + +// NewEvalContext new context +func NewEvalContext(req *request.Request) *EvalContext { + objSet := pdptypes.NewObjectSet() + + for _, r := range req.Resources { + // maybe nil here + if r.Attribute == nil { + r.Attribute = types.Attribute{} + } + // set id into attributes + r.Attribute.Set("id", r.ID) + + // bk_job.script => attributes + _type := r.System + "." + r.Type + objSet.Set(_type, r.Attribute) + } + // TODO: 需要限制接入系统资源id字段不能配置为attribute; 因为会被覆盖 + return &EvalContext{ + Request: req, + objSet: objSet, + } +} + +// GetAttr 获取资源的属性值 +func (c *EvalContext) GetAttr(name string) (interface{}, error) { + // name should be {system}.{resource_type}.{attr_key} + return c.objSet.GetAttribute(name), nil +} + +func (c *EvalContext) HasResource(_type string) bool { + // has {system}.{resource_type} + return c.objSet.Has(_type) +} + +func (c *EvalContext) SetEnv(envs map[string]interface{}) { + c.objSet.Set(c.System+types.IamEnvSuffix, envs) +} + +func (c *EvalContext) UnsetEnv() { + c.objSet.Del(c.System + types.IamEnvSuffix) +} + +func (c *EvalContext) HasEnv() bool { + return c.objSet.Has(c.System + types.IamEnvSuffix) +} + +func (c *EvalContext) InitEnvironments(cond condition.Condition, currentTime time.Time) error { + // build envs + c.UnsetEnv() + + hasEnvFunc := func(key string) bool { + return strings.Contains(key, types.IamEnvSuffix) + } + // 限制: 一条策略, 有多个环境属性, 但是只能有一个tz(逻辑计算无法在同一个表达式中支持多个时区) + hasEnvTzFunc := func(key string) bool { + return strings.HasSuffix(key, types.IamEnvTzSuffix) + } + + if cond.HasKey(hasEnvFunc) { + // NOTE: 开启环境属性, 不一定会有tz, 而是 有配置时间相关环境属性, 一定会配置tz + if tzValues, exists := cond.GetFirstMatchKeyValues(hasEnvTzFunc); exists { + if len(tzValues) != 1 { + return fmt.Errorf("pdp ctx initEnvironments got not tz in condition") + } + + tz, ok := tzValues[0].(string) + if !ok { + return fmt.Errorf("pdp ctx initEnvironments got tz not a string") + } + + envs, err := GenTimeEnvsFromCache(tz, currentTime) + if err != nil { + return fmt.Errorf("pdp gen envs fail, %w", err) + } + c.SetEnv(envs) + } + + // NOTE: if got more envs, build it here before set + + // e.g. + /* + { + // basic, should all have tz field + "tz": "Asia/Shanghai", + // now + "hms": 172910, + // later: + "ts": 1638523704, + "weekday": 3, + "monthday": 29, + "month": 12, + } + */ + } + return nil +} + +// GenTimeEnvsFromCache will return the same time-related envs if the tz and timestamp are same! +// NOTE: cache only if the envs is same for every request +// if you will change the envs later(e.g. set some value from request, do not cache it!) +// at that time, you should remove this func, use a new collection like sync.Pool +func GenTimeEnvsFromCache(tz string, currentTime time.Time) (map[string]interface{}, error) { + key := tz + strconv.FormatInt(currentTime.Unix(), 10) + + cachedEnvs, ok := localTimeEnvsCache.Get(key) + // hit + if ok { + return cachedEnvs.(map[string]interface{}), nil + } + // miss + envs, err := genTimeEnvs(tz, currentTime) + if err != nil { + return nil, err + } + + localTimeEnvsCache.SetDefault(key, envs) + return envs, nil +} + +func genTimeEnvs(tz string, currentTime time.Time) (map[string]interface{}, error) { + loc, err := time.LoadLocation(tz) + if err != nil { + return nil, fmt.Errorf("pdp load policy timezone location fail, tz=%s, %w", tz, err) + } + + t := currentTime.In(loc) + + // hms means hour-minute-second, transfer 08:30:20 to 83020; 10:41:21 to 104121 + hms := int64(10000*t.Hour() + 100*t.Minute() + t.Second()) + + envs := map[string]interface{}{ + "tz": tz, + "hms": hms, + // "ts": t.Unix(), + } + return envs, nil +} diff --git a/pkg/abac/pdp/evalctx/context_test.go b/pkg/abac/pdp/evalctx/context_test.go new file mode 100644 index 00000000..93239d8a --- /dev/null +++ b/pkg/abac/pdp/evalctx/context_test.go @@ -0,0 +1,351 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available. + * Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package evalctx + +import ( + "fmt" + "strconv" + "testing" + "time" + + "iam/pkg/abac/pdp/condition" + pdptypes "iam/pkg/abac/pdp/types" + + . "github.com/onsi/ginkgo/v2" + gocache "github.com/patrickmn/go-cache" + "github.com/stretchr/testify/assert" + + "iam/pkg/abac/types" + "iam/pkg/abac/types/request" +) + +var _ = Describe("Context", func() { + + var req *request.Request + var c *EvalContext + BeforeEach(func() { + req = &request.Request{ + System: "iam", + Subject: types.Subject{ + Type: "user", + ID: "admin", + }, + Action: types.Action{ + ID: "execute_job", + }, + Resources: []types.Resource{ + { + + System: "iam", + Type: "job", + ID: "job1", + Attribute: map[string]interface{}{"key": "value1"}, + }, + }, + } + c = NewEvalContext(req) + }) + + Describe("NewEvalContext", func() { + It("no resources", func() { + req := &request.Request{} + ec := NewEvalContext(req) + assert.NotNil(GinkgoT(), ec) + }) + + It("ok, has resource", func() { + ec := NewEvalContext(req) + assert.NotNil(GinkgoT(), ec) + }) + + It("ok, has resource, attribute nil", func() { + req := &request.Request{ + Resources: []types.Resource{ + { + ID: "test", + Attribute: nil, + }, + }, + } + ec := NewEvalContext(req) + assert.NotNil(GinkgoT(), ec) + }) + + }) + + Describe("GetAttr", func() { + It("ok", func() { + a, err := c.GetAttr("iam.job.id") + assert.NoError(GinkgoT(), err) + assert.Equal(GinkgoT(), "job1", a) + }) + + It("miss", func() { + a, err := c.GetAttr("bk_cmdb.job.id") + assert.NoError(GinkgoT(), err) + assert.Nil(GinkgoT(), a) + }) + }) + + Describe("HasResource", func() { + It("ok", func() { + assert.True(GinkgoT(), c.HasResource("iam.job")) + }) + + It("miss", func() { + assert.False(GinkgoT(), c.HasResource("bk_cmdb.job")) + + }) + }) + + Describe("SetEnv", func() { + It("ok", func() { + c.SetEnv(map[string]interface{}{"ts": 123}) + + ts, err := c.GetAttr("iam._bk_iam_env_.ts") + assert.NoError(GinkgoT(), err) + assert.Equal(GinkgoT(), 123, ts) + }) + }) + + Describe("UnsetEnv", func() { + It("ok", func() { + c.SetEnv(map[string]interface{}{"ts": 123}) + + ts, err := c.GetAttr("iam._bk_iam_env_.ts") + assert.NoError(GinkgoT(), err) + assert.Equal(GinkgoT(), 123, ts) + + c.UnsetEnv() + + assert.False(GinkgoT(), c.HasResource("iam._bk_iam_env_")) + }) + }) + + Describe("HasEnv", func() { + It("ok", func() { + assert.False(GinkgoT(), c.HasEnv()) + c.SetEnv(map[string]interface{}{"ts": 123}) + assert.True(GinkgoT(), c.HasEnv()) + }) + }) + + Describe("InitEnvironments", func() { + + var noEnvCond condition.Condition + var notEnvTimeCond condition.Condition + var envTimeCond condition.Condition + BeforeEach(func() { + // init the cache + localTimeEnvsCache = gocache.New(10*time.Second, 30*time.Second) + + c1 := pdptypes.PolicyCondition{ + "AND": map[string][]interface{}{ + "content": { + map[string]interface{}{"StringEquals": map[string]interface{}{"iam.system": []interface{}{"linux"}}}, + map[string]interface{}{"StringPrefix": map[string]interface{}{"iam.path": []interface{}{"/biz,1/"}}}, + }, + }, + } + noEnvCond, _ = condition.NewConditionFromPolicyCondition(c1) + + c2 := pdptypes.PolicyCondition{ + "AND": map[string][]interface{}{ + "content": { + map[string]interface{}{"StringEquals": map[string]interface{}{"iam.host.system": []interface{}{"linux"}}}, + map[string]interface{}{"StringPrefix": map[string]interface{}{"iam.host.path": []interface{}{"/biz,1/"}}}, + map[string]interface{}{"StringEquals": map[string]interface{}{"iam._bk_iam_env_.system": []interface{}{"iam"}}}, + }, + }, + } + notEnvTimeCond, _ = condition.NewConditionFromPolicyCondition(c2) + + c3 := pdptypes.PolicyCondition{ + "AND": map[string][]interface{}{ + "content": { + map[string]interface{}{"StringEquals": map[string]interface{}{"iam.host.system": []interface{}{"linux"}}}, + map[string]interface{}{"StringPrefix": map[string]interface{}{"iam.host.path": []interface{}{"/biz,1/"}}}, + map[string]interface{}{"StringEquals": map[string]interface{}{"iam._bk_iam_env_.tz": []interface{}{"Asia/Shanghai"}}}, + map[string]interface{}{"NumericLt": map[string]interface{}{"iam._bk_iam_env_.hms": []interface{}{163630}}}, + }, + }, + } + envTimeCond, _ = condition.NewConditionFromPolicyCondition(c3) + }) + AfterEach(func() { + c.UnsetEnv() + }) + + It("has no envs", func() { + err := c.InitEnvironments(noEnvCond, time.Now()) + assert.NoError(GinkgoT(), err) + + assert.False(GinkgoT(), c.HasResource("iam._bk_iam_env_")) + }) + + It("has env, not time-related", func() { + err := c.InitEnvironments(notEnvTimeCond, time.Now()) + assert.NoError(GinkgoT(), err) + + assert.False(GinkgoT(), c.HasResource("iam._bk_iam_env_")) + }) + + It("has time-related env, ok", func() { + tz := "Asia/Shanghai" + + loc, _ := time.LoadLocation(tz) + t, _ := time.ParseInLocation("2006-01-02 15:04:05 Z0700 MST", "2021-12-03 15:54:06 +0800 CST", loc) + hms := int64(155406) + + fmt.Println("print the c, is nil? ", c, c == nil) + + err := c.InitEnvironments(envTimeCond, t) + assert.NoError(GinkgoT(), err) + + assert.True(GinkgoT(), c.HasResource("iam._bk_iam_env_")) + + tzA, err := c.GetAttr("iam._bk_iam_env_.tz") + assert.Equal(GinkgoT(), "Asia/Shanghai", tzA) + hmsA, err := c.GetAttr("iam._bk_iam_env_.hms") + assert.Equal(GinkgoT(), hms, hmsA) + }) + + It("has time-related env, 2 tz", func() { + c3 := pdptypes.PolicyCondition{ + "AND": map[string][]interface{}{ + "content": { + map[string]interface{}{"StringEquals": map[string]interface{}{"iam._bk_iam_env_.tz": []interface{}{"Asia/Shanghai", "America/New_York"}}}, + map[string]interface{}{"NumericLt": map[string]interface{}{"iam._bk_iam_env_.hms": []interface{}{163630}}}, + }, + }, + } + cond, _ := condition.NewConditionFromPolicyCondition(c3) + err := c.InitEnvironments(cond, time.Now()) + assert.Error(GinkgoT(), err) + }) + + It("has time-related env, tz wrong type", func() { + c3 := pdptypes.PolicyCondition{ + "AND": map[string][]interface{}{ + "content": { + map[string]interface{}{"StringEquals": map[string]interface{}{"iam._bk_iam_env_.tz": []interface{}{123}}}, + map[string]interface{}{"NumericLt": map[string]interface{}{"iam._bk_iam_env_.hms": []interface{}{163630}}}, + }, + }, + } + cond, _ := condition.NewConditionFromPolicyCondition(c3) + err := c.InitEnvironments(cond, time.Now()) + assert.Error(GinkgoT(), err) + }) + + It("has time-related env, tz wrong", func() { + c3 := pdptypes.PolicyCondition{ + "AND": map[string][]interface{}{ + "content": { + map[string]interface{}{"StringEquals": map[string]interface{}{"iam._bk_iam_env_.tz": []interface{}{"wrong"}}}, + map[string]interface{}{"NumericLt": map[string]interface{}{"iam._bk_iam_env_.hms": []interface{}{163630}}}, + }, + }, + } + cond, _ := condition.NewConditionFromPolicyCondition(c3) + err := c.InitEnvironments(cond, time.Now()) + assert.Error(GinkgoT(), err) + }) + }) + + Describe("envs time", func() { + + var tz string + var t time.Time + var hms int64 + BeforeEach(func() { + tz = "Asia/Shanghai" + + loc, _ := time.LoadLocation(tz) + t, _ = time.ParseInLocation("2006-01-02 15:04:05 Z0700 MST", "2021-12-03 15:54:06 +0800 CST", loc) + hms = int64(155406) + }) + + Describe("GenTimeEnvsFromCache", func() { + It("ok", func() { + envs, err := GenTimeEnvsFromCache(tz, t) + assert.NoError(GinkgoT(), err) + + assert.Len(GinkgoT(), envs, 2) + assert.Equal(GinkgoT(), tz, envs["tz"]) + assert.Equal(GinkgoT(), hms, envs["hms"]) + + envs2, err := GenTimeEnvsFromCache(tz, t) + assert.NoError(GinkgoT(), err) + assert.Equal(GinkgoT(), envs, envs2) + }) + + It("fail", func() { + tz := "Wrong" + _, err := GenTimeEnvsFromCache(tz, time.Now()) + assert.Error(GinkgoT(), err) + }) + }) + + Describe("genTimeEnvs", func() { + It("ok", func() { + envs, err := genTimeEnvs(tz, t) + assert.NoError(GinkgoT(), err) + + assert.Len(GinkgoT(), envs, 2) + assert.Equal(GinkgoT(), tz, envs["tz"]) + assert.Equal(GinkgoT(), hms, envs["hms"]) + }) + + It("fail", func() { + tz := "Wrong" + _, err := genTimeEnvs(tz, time.Now()) + assert.Error(GinkgoT(), err) + }) + + }) + + }) + +}) + +func BenchmarkGenEnvsInReal(b *testing.B) { + tz := "Asia/Shanghai" + currentTime := time.Now() + + for i := 0; i < b.N; i++ { + genTimeEnvs(tz, currentTime) + } +} +func BenchmarkGenEnvsFromSyncMap(b *testing.B) { + tz := "Asia/Shanghai" + currentTime := time.Now() + + m := gocache.New(10*time.Second, 20*time.Second) + // m := sync.Map{} + // for _, x := range a { + // m.Store(x, strconv.FormatInt(x, 10)) + // } + + for i := 0; i < b.N; i++ { + key := tz + strconv.FormatInt(currentTime.Unix(), 10) + // key := fmt.Sprintf("%s%d", tz, currentTime.Unix()) + + _, ok := m.Get(key) + if !ok { + envs, err := genTimeEnvs(tz, currentTime) + if err == nil { + m.Set(key, envs, 0) + // m.Store(key, envs) + } + } + } +} diff --git a/pkg/abac/pdp/evalctx/evalctx_suite_test.go b/pkg/abac/pdp/evalctx/evalctx_suite_test.go new file mode 100644 index 00000000..ea54dc27 --- /dev/null +++ b/pkg/abac/pdp/evalctx/evalctx_suite_test.go @@ -0,0 +1,13 @@ +package evalctx_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestEvalctx(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Evalctx Suite") +} diff --git a/pkg/abac/pdp/evalctx/init.go b/pkg/abac/pdp/evalctx/init.go new file mode 100644 index 00000000..6a54d746 --- /dev/null +++ b/pkg/abac/pdp/evalctx/init.go @@ -0,0 +1,13 @@ +package evalctx + +import ( + "time" + + gocache "github.com/patrickmn/go-cache" +) + +var localTimeEnvsCache *gocache.Cache + +func init() { + localTimeEnvsCache = gocache.New(10*time.Second, 30*time.Second) +} diff --git a/pkg/abac/pdp/evaluation/evaluation.go b/pkg/abac/pdp/evaluation/evaluation.go index 5a30dd4e..6e492d91 100644 --- a/pkg/abac/pdp/evaluation/evaluation.go +++ b/pkg/abac/pdp/evaluation/evaluation.go @@ -13,26 +13,27 @@ package evaluation import ( "fmt" "strings" + "time" log "github.com/sirupsen/logrus" "iam/pkg/abac/pdp/condition" "iam/pkg/abac/pdp/condition/operator" - pdptypes "iam/pkg/abac/pdp/types" + "iam/pkg/abac/pdp/evalctx" "iam/pkg/abac/types" "iam/pkg/cacheimpls" ) -/* -求值逻辑, 包括: - -对Policy的condition求值 -*/ +// NOTE: 目前所有的 query/eval都在这个文件中, 两个主要入口: +// - eval: EvalPolicies +// - query: PartialEvalPolicies // EvalPolicies 计算是否满足 -func EvalPolicies(ctx *pdptypes.EvalContext, policies []types.AuthPolicy) (isPass bool, policyID int64, err error) { +func EvalPolicies(ctx *evalctx.EvalContext, policies []types.AuthPolicy) (isPass bool, policyID int64, err error) { + currentTime := time.Now() + for _, policy := range policies { - isPass, err = evalPolicy(ctx, policy) + isPass, err = evalPolicy(ctx, policy, currentTime) if err != nil { log.Debugf("pdp evalPolicy: ctx=`%+v`, policy=`%+v`, error=`%s`", ctx, policy, err) } @@ -47,7 +48,7 @@ func EvalPolicies(ctx *pdptypes.EvalContext, policies []types.AuthPolicy) (isPas } // evalPolicy 计算单个policy是否满足 -func evalPolicy(ctx *pdptypes.EvalContext, policy types.AuthPolicy) (bool, error) { +func evalPolicy(ctx *evalctx.EvalContext, policy types.AuthPolicy, currentTime time.Time) (bool, error) { // action 不关联资源类型时, 直接返回true if ctx.Action.WithoutResourceType() { log.Debugf("pdp evalPolicy WithoutResourceType action: %s %s", ctx.System, ctx.Action.ID) @@ -67,20 +68,29 @@ func evalPolicy(ctx *pdptypes.EvalContext, policy types.AuthPolicy) (bool, error return false, err } + err = ctx.InitEnvironments(cond, currentTime) + if err != nil { + log.Errorf("pdp evalPolicy polidy id:%d expression: %s, currentTime: %s, error:%v", + policy.ID, policy.Expression, currentTime, err) + return false, err + } + isPass := cond.Eval(ctx) return isPass, err } // PartialEvalPolicies 筛选check pass的policies func PartialEvalPolicies( - ctx *pdptypes.EvalContext, + ctx *evalctx.EvalContext, policies []types.AuthPolicy, ) ([]condition.Condition, []int64, error) { + currentTime := time.Now() + remainedConditions := make([]condition.Condition, 0, len(policies)) passedPolicyIDs := make([]int64, 0, len(policies)) for _, policy := range policies { - isPass, condition, err := partialEvalPolicy(ctx, policy) + isPass, condition, err := partialEvalPolicy(ctx, policy, currentTime) if err != nil { // TODO: 一条报错怎么处理????? log.Debugf("pdp PartialEvalPoliciesy policy: %+v ctx: %+v error: %s", policy, ctx, err) @@ -98,7 +108,11 @@ func PartialEvalPolicies( return remainedConditions, passedPolicyIDs, nil } -func partialEvalPolicy(ctx *pdptypes.EvalContext, policy types.AuthPolicy) (bool, condition.Condition, error) { +func partialEvalPolicy( + ctx *evalctx.EvalContext, + policy types.AuthPolicy, + currentTime time.Time, +) (bool, condition.Condition, error) { // action 不关联资源类型时, 直接返回true if ctx.Action.WithoutResourceType() { log.Debugf("pdp evalPolicy WithoutResourceType action: %s %s", ctx.System, ctx.Action.ID) @@ -112,8 +126,16 @@ func partialEvalPolicy(ctx *pdptypes.EvalContext, policy types.AuthPolicy) (bool return false, nil, err } + err = ctx.InitEnvironments(cond, currentTime) + if err != nil { + log.Errorf( + "pdp evalPolicy polidy id:%d expression: %s, currentTime: %s, error:%v", + policy.ID, policy.Expression, currentTime, err) + return false, nil, err + } + // if no resource passed - if !ctx.HasResources() { + if !(ctx.HasResources() || ctx.HasEnv()) { return true, cond, nil } diff --git a/pkg/abac/pdp/evaluation/evaluation_suite_test.go b/pkg/abac/pdp/evaluation/evaluation_suite_test.go index 7bcdb5c1..010db33e 100644 --- a/pkg/abac/pdp/evaluation/evaluation_suite_test.go +++ b/pkg/abac/pdp/evaluation/evaluation_suite_test.go @@ -13,7 +13,7 @@ package evaluation_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/abac/pdp/evaluation/evaluation_test.go b/pkg/abac/pdp/evaluation/evaluation_test.go index dbc0a5a4..b334681e 100644 --- a/pkg/abac/pdp/evaluation/evaluation_test.go +++ b/pkg/abac/pdp/evaluation/evaluation_test.go @@ -11,12 +11,14 @@ package evaluation import ( + "time" + "github.com/TencentBlueKing/gopkg/cache/memory" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pdp/condition" - pdptypes "iam/pkg/abac/pdp/types" + "iam/pkg/abac/pdp/evalctx" "iam/pkg/abac/types" "iam/pkg/abac/types/request" "iam/pkg/cacheimpls" @@ -24,7 +26,7 @@ import ( var _ = Describe("Evaluation", func() { - var c *pdptypes.EvalContext + var c *evalctx.EvalContext var policy types.AuthPolicy willPassPolicy := types.AuthPolicy{ ID: 1, @@ -122,7 +124,7 @@ var _ = Describe("Evaluation", func() { Type: "job", }, }) - c = pdptypes.NewEvalContext(request) + c = evalctx.NewEvalContext(request) policy = types.AuthPolicy{ Expression: "", } @@ -199,14 +201,14 @@ var _ = Describe("Evaluation", func() { Describe("evalPolicy", func() { It("ctx.Action.WithoutResourceType", func() { c.Action.FillAttributes(1, []types.ActionResourceType{}) - allowed, err := evalPolicy(c, policy) + allowed, err := evalPolicy(c, policy, time.Now()) assert.NoError(GinkgoT(), err) assert.True(GinkgoT(), allowed) }) It("has no resources", func() { c.Resources = []types.Resource{} - allowed, err := evalPolicy(c, policy) + allowed, err := evalPolicy(c, policy, time.Now()) assert.False(GinkgoT(), allowed) assert.Contains(GinkgoT(), err.Error(), "get not resource in request") @@ -216,7 +218,7 @@ var _ = Describe("Evaluation", func() { policy = types.AuthPolicy{ Expression: "123", } - allowed, err := evalPolicy(c, policy) + allowed, err := evalPolicy(c, policy, time.Now()) assert.Error(GinkgoT(), err) assert.False(GinkgoT(), allowed) }) @@ -248,7 +250,7 @@ var _ = Describe("Evaluation", func() { ExpressionSignature: "33268b97074629d05fda196e2f7e59d2", } - allowed, err := evalPolicy(c, policy) + allowed, err := evalPolicy(c, policy, time.Now()) assert.NoError(GinkgoT(), err) assert.True(GinkgoT(), allowed) }) @@ -280,7 +282,7 @@ var _ = Describe("Evaluation", func() { ExpressionSignature: "cfeeb810bf45de623f8007d25d25293a", } - allowed, err := evalPolicy(c, policy) + allowed, err := evalPolicy(c, policy, time.Now()) assert.NoError(GinkgoT(), err) assert.False(GinkgoT(), allowed) }) @@ -350,7 +352,7 @@ var _ = Describe("Evaluation", func() { Describe("partialEvalPolicy", func() { It("ctx.Action.WithoutResourceType", func() { c.Action.FillAttributes(1, []types.ActionResourceType{}) - allowed, cond, err := partialEvalPolicy(c, policy) + allowed, cond, err := partialEvalPolicy(c, policy, time.Now()) assert.NoError(GinkgoT(), err) assert.True(GinkgoT(), allowed) assert.Equal(GinkgoT(), condition.NewAnyCondition(), cond) @@ -358,14 +360,14 @@ var _ = Describe("Evaluation", func() { It("has no resources in ctx", func() { c.Resources = []types.Resource{} - allowed, cond, err := partialEvalPolicy(c, policy) + allowed, cond, err := partialEvalPolicy(c, policy, time.Now()) assert.NoError(GinkgoT(), err) assert.True(GinkgoT(), allowed) assert.Equal(GinkgoT(), condition.NewAnyCondition(), cond) }) It("ok, any", func() { - allowed, cond, err := partialEvalPolicy(c, policy) + allowed, cond, err := partialEvalPolicy(c, policy, time.Now()) assert.NoError(GinkgoT(), err) assert.True(GinkgoT(), allowed) assert.Equal(GinkgoT(), condition.NewAnyCondition(), cond) @@ -390,7 +392,7 @@ var _ = Describe("Evaluation", func() { ExpressionSignature: "7c1af23ce3f3664789c5d698f8c3f0d5", } - allowed, cond, err := partialEvalPolicy(c, policy) + allowed, cond, err := partialEvalPolicy(c, policy, time.Now()) assert.NoError(GinkgoT(), err) assert.True(GinkgoT(), allowed) assert.Equal(GinkgoT(), condition.NewAnyCondition(), cond) @@ -413,7 +415,7 @@ var _ = Describe("Evaluation", func() { ExpressionSignature: "7c1af23ce3f3664789c5d698f8c3f0d5", } - allowed, cond, err := partialEvalPolicy(c, policy) + allowed, cond, err := partialEvalPolicy(c, policy, time.Now()) assert.NoError(GinkgoT(), err) assert.False(GinkgoT(), allowed) assert.Nil(GinkgoT(), cond) @@ -436,7 +438,7 @@ var _ = Describe("Evaluation", func() { ExpressionSignature: "609d10bfe269ee71bb708209696572f9", } - allowed, cond, err := partialEvalPolicy(c, policy) + allowed, cond, err := partialEvalPolicy(c, policy, time.Now()) assert.NoError(GinkgoT(), err) assert.True(GinkgoT(), allowed) assert.NotNil(GinkgoT(), cond) diff --git a/pkg/abac/pdp/helper.go b/pkg/abac/pdp/helper.go index 8c4f5370..5e5ae6cb 100644 --- a/pkg/abac/pdp/helper.go +++ b/pkg/abac/pdp/helper.go @@ -13,12 +13,13 @@ package pdp import ( "database/sql" "errors" + "time" "github.com/TencentBlueKing/gopkg/errorx" "iam/pkg/abac/pdp/condition" + "iam/pkg/abac/pdp/evalctx" "iam/pkg/abac/pdp/evaluation" - pdptypes "iam/pkg/abac/pdp/types" "iam/pkg/abac/pip" "iam/pkg/abac/prp" "iam/pkg/abac/types" @@ -156,7 +157,11 @@ func queryAndPartialEvalConditions( } // 执行完后, 只返回 执行后的残留的 conditions - conditions, passedPoliciesIDs, err := evaluation.PartialEvalPolicies(pdptypes.NewEvalContext(r), policies) + if entry != nil { + envs, _ := evalctx.GenTimeEnvsFromCache(DefaultTz, time.Now()) + debug.WithValue(entry, "env", envs) + } + conditions, passedPoliciesIDs, err := evaluation.PartialEvalPolicies(evalctx.NewEvalContext(r), policies) if len(conditions) == 0 { debug.WithNoPassEvalPolicies(entry, policies) } diff --git a/pkg/abac/pdp/helper_test.go b/pkg/abac/pdp/helper_test.go index 116354fe..ca27b8f1 100644 --- a/pkg/abac/pdp/helper_test.go +++ b/pkg/abac/pdp/helper_test.go @@ -14,14 +14,14 @@ import ( "errors" "reflect" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pdp/condition" + "iam/pkg/abac/pdp/evalctx" "iam/pkg/abac/pdp/evaluation" - types2 "iam/pkg/abac/pdp/types" "iam/pkg/abac/pip" "iam/pkg/abac/prp" "iam/pkg/abac/prp/mock" @@ -112,10 +112,6 @@ var _ = Describe("Helper", func() { } patches = gomonkey.NewPatches() - patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionRemoteResource", - func(_ *request.Request) bool { - return true - }) }) AfterEach(func() { @@ -152,6 +148,10 @@ var _ = Describe("Helper", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionRemoteResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return errors.New("fill subject fail") }) @@ -166,6 +166,10 @@ var _ = Describe("Helper", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionRemoteResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return nil }) @@ -187,6 +191,10 @@ var _ = Describe("Helper", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionRemoteResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return nil }) @@ -198,7 +206,7 @@ var _ = Describe("Helper", func() { ) (policies []types.AuthPolicy, err error) { return []types.AuthPolicy{{}}, nil }) - patches.ApplyFunc(evaluation.PartialEvalPolicies, func(ctx *types2.EvalContext, + patches.ApplyFunc(evaluation.PartialEvalPolicies, func(ctx *evalctx.EvalContext, policie []types.AuthPolicy, ) ([]condition.Condition, []int64, error) { return nil, nil, errors.New("filter error") @@ -214,6 +222,10 @@ var _ = Describe("Helper", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionRemoteResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return nil }) @@ -225,7 +237,7 @@ var _ = Describe("Helper", func() { ) (policies []types.AuthPolicy, err error) { return []types.AuthPolicy{{}}, nil }) - patches.ApplyFunc(evaluation.PartialEvalPolicies, func(ctx *types2.EvalContext, + patches.ApplyFunc(evaluation.PartialEvalPolicies, func(ctx *evalctx.EvalContext, policies []types.AuthPolicy, ) ([]condition.Condition, []int64, error) { return []condition.Condition{}, []int64{}, nil @@ -240,6 +252,10 @@ var _ = Describe("Helper", func() { patches.ApplyFunc(fillActionDetail, func(req *request.Request) error { return nil }) + patches.ApplyMethod(reflect.TypeOf(req), "ValidateActionRemoteResource", + func(_ *request.Request) bool { + return true + }) patches.ApplyFunc(fillSubjectDetail, func(req *request.Request) error { return nil }) @@ -251,7 +267,7 @@ var _ = Describe("Helper", func() { ) (policies []types.AuthPolicy, err error) { return []types.AuthPolicy{{}}, nil }) - patches.ApplyFunc(evaluation.PartialEvalPolicies, func(ctx *types2.EvalContext, + patches.ApplyFunc(evaluation.PartialEvalPolicies, func(ctx *evalctx.EvalContext, policies []types.AuthPolicy, ) ([]condition.Condition, []int64, error) { return []condition.Condition{ diff --git a/pkg/abac/pdp/pdp_suite_test.go b/pkg/abac/pdp/pdp_suite_test.go index 42d19a6e..73cd364d 100644 --- a/pkg/abac/pdp/pdp_suite_test.go +++ b/pkg/abac/pdp/pdp_suite_test.go @@ -13,7 +13,7 @@ package pdp_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/abac/pdp/remote_test.go b/pkg/abac/pdp/remote_test.go index 823ec694..596d1954 100644 --- a/pkg/abac/pdp/remote_test.go +++ b/pkg/abac/pdp/remote_test.go @@ -18,9 +18,9 @@ import ( "iam/pkg/abac/pdp/condition" "iam/pkg/abac/pip" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/types" diff --git a/pkg/abac/pdp/translate/translate_suite_test.go b/pkg/abac/pdp/translate/translate_suite_test.go index ae9260f3..32329a13 100644 --- a/pkg/abac/pdp/translate/translate_suite_test.go +++ b/pkg/abac/pdp/translate/translate_suite_test.go @@ -13,7 +13,7 @@ package translate_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/abac/pdp/translate/translate_test.go b/pkg/abac/pdp/translate/translate_test.go index bc4783dc..6f31b902 100644 --- a/pkg/abac/pdp/translate/translate_test.go +++ b/pkg/abac/pdp/translate/translate_test.go @@ -13,7 +13,7 @@ package translate import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pdp/condition" @@ -74,8 +74,8 @@ var _ = Describe("Expression", func() { assert.NoError(GinkgoT(), err) assert.Equal(GinkgoT(), "OR", expr["op"]) assert.ElementsMatch(GinkgoT(), want["content"], expr["content"]) - //assert.Equal(GinkgoT(), want, expr) - //assert.True(GinkgoT(), assert.ObjectsAreEqualValues(want, expr)) + // assert.Equal(GinkgoT(), want, expr) + // assert.True(GinkgoT(), assert.ObjectsAreEqualValues(want, expr)) }) diff --git a/pkg/abac/pdp/types/context_test.go b/pkg/abac/pdp/types/context_test.go deleted file mode 100644 index 90161a98..00000000 --- a/pkg/abac/pdp/types/context_test.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available. - * Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. - * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at http://opensource.org/licenses/MIT - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package types - -import ( - . "github.com/onsi/ginkgo" - "github.com/stretchr/testify/assert" - - "iam/pkg/abac/types" - "iam/pkg/abac/types/request" -) - -var _ = Describe("Context", func() { - - var req *request.Request - var c *EvalContext - BeforeEach(func() { - req = &request.Request{ - System: "iam", - Subject: types.Subject{ - Type: "user", - ID: "admin", - }, - Action: types.Action{ - ID: "execute_job", - }, - Resources: []types.Resource{ - { - - System: "iam", - Type: "job", - ID: "job1", - Attribute: map[string]interface{}{"key": "value1"}, - }, - }, - } - c = NewEvalContext(req) - }) - - Describe("NewEvalContext", func() { - It("no resources", func() { - req := &request.Request{} - ec := NewEvalContext(req) - assert.NotNil(GinkgoT(), ec) - }) - - It("ok, has resource", func() { - ec := NewEvalContext(req) - assert.NotNil(GinkgoT(), ec) - }) - - It("ok, has resource, attribute nil", func() { - req := &request.Request{ - Resources: []types.Resource{ - { - ID: "test", - Attribute: nil, - }, - }, - } - ec := NewEvalContext(req) - assert.NotNil(GinkgoT(), ec) - }) - - }) - - Describe("GetAttr", func() { - It("ok", func() { - a, err := c.GetAttr("iam.job.id") - assert.NoError(GinkgoT(), err) - assert.Equal(GinkgoT(), "job1", a) - }) - - It("miss", func() { - a, err := c.GetAttr("bk_cmdb.job.id") - assert.NoError(GinkgoT(), err) - assert.Nil(GinkgoT(), a) - }) - }) - - Describe("HasResource", func() { - It("ok", func() { - assert.True(GinkgoT(), c.HasResource("iam.job")) - }) - - It("miss", func() { - assert.False(GinkgoT(), c.HasResource("bk_cmdb.job")) - - }) - - }) - -}) diff --git a/pkg/abac/pdp/types/object_test.go b/pkg/abac/pdp/types/object_test.go index 17552d68..ed002413 100644 --- a/pkg/abac/pdp/types/object_test.go +++ b/pkg/abac/pdp/types/object_test.go @@ -13,7 +13,7 @@ package types import ( "strings" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/abac/pdp/types/policy_condition_test.go b/pkg/abac/pdp/types/policy_condition_test.go index 80922dbd..3c0fb797 100644 --- a/pkg/abac/pdp/types/policy_condition_test.go +++ b/pkg/abac/pdp/types/policy_condition_test.go @@ -1,7 +1,7 @@ package types_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pdp/types" diff --git a/pkg/abac/pdp/types/types_suite_test.go b/pkg/abac/pdp/types/types_suite_test.go index 7f0a208b..7a5c9cfb 100644 --- a/pkg/abac/pdp/types/types_suite_test.go +++ b/pkg/abac/pdp/types/types_suite_test.go @@ -13,7 +13,7 @@ package types_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/abac/pdp/types/util_test.go b/pkg/abac/pdp/types/util_test.go index 360bbf35..b3870f6c 100644 --- a/pkg/abac/pdp/types/util_test.go +++ b/pkg/abac/pdp/types/util_test.go @@ -11,7 +11,7 @@ package types import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/abac/pip/action_test.go b/pkg/abac/pip/action_test.go index 127df76f..3d3097e0 100644 --- a/pkg/abac/pip/action_test.go +++ b/pkg/abac/pip/action_test.go @@ -13,9 +13,9 @@ package pip_test import ( "errors" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pip" diff --git a/pkg/abac/pip/pip_suite_test.go b/pkg/abac/pip/pip_suite_test.go index 93151133..c08f37e1 100644 --- a/pkg/abac/pip/pip_suite_test.go +++ b/pkg/abac/pip/pip_suite_test.go @@ -13,7 +13,7 @@ package pip_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/abac/pip/resource_test.go b/pkg/abac/pip/resource_test.go index 6bf5a7e9..28b274ad 100644 --- a/pkg/abac/pip/resource_test.go +++ b/pkg/abac/pip/resource_test.go @@ -13,9 +13,9 @@ package pip_test import ( "errors" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pip" diff --git a/pkg/abac/pip/subject_test.go b/pkg/abac/pip/subject_test.go index aa662c91..2ada3f86 100644 --- a/pkg/abac/pip/subject_test.go +++ b/pkg/abac/pip/subject_test.go @@ -13,9 +13,9 @@ package pip_test import ( "errors" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/pip" diff --git a/pkg/abac/prp/common/changelist_test.go b/pkg/abac/prp/common/changelist_test.go index 1e2e7d25..21c4838d 100644 --- a/pkg/abac/prp/common/changelist_test.go +++ b/pkg/abac/prp/common/changelist_test.go @@ -15,9 +15,9 @@ import ( "reflect" "time" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" rds "github.com/go-redis/redis/v8" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/prp/common" diff --git a/pkg/abac/prp/common/common_suite_test.go b/pkg/abac/prp/common/common_suite_test.go index a7e3d8fb..44fc00ed 100644 --- a/pkg/abac/prp/common/common_suite_test.go +++ b/pkg/abac/prp/common/common_suite_test.go @@ -13,7 +13,7 @@ package common_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/abac/prp/expression/database_test.go b/pkg/abac/prp/expression/database_test.go index a20b3fdc..1dba04f7 100644 --- a/pkg/abac/prp/expression/database_test.go +++ b/pkg/abac/prp/expression/database_test.go @@ -13,36 +13,37 @@ package expression import ( "errors" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/service" "iam/pkg/service/mock" - "iam/pkg/service/types" ) var _ = Describe("Database", func() { Describe("newDatabaseRetriever", func() { - var ctl *gomock.Controller - var patches *gomonkey.Patches + It("ok", func() { + var ctl *gomock.Controller + var patches *gomonkey.Patches - ctl = gomock.NewController(GinkgoT()) - patches = gomonkey.NewPatches() + ctl = gomock.NewController(GinkgoT()) + patches = gomonkey.NewPatches() - mockPolicyService := mock.NewMockPolicyService(ctl) - patches.ApplyFunc(service.NewPolicyService, func() service.PolicyService { - return mockPolicyService - }) + mockPolicyService := mock.NewMockPolicyService(ctl) + patches.ApplyFunc(service.NewPolicyService, func() service.PolicyService { + return mockPolicyService + }) - r := newDatabaseRetriever() - assert.NotNil(GinkgoT(), r) + r := newDatabaseRetriever() + assert.NotNil(GinkgoT(), r) - ctl.Finish() - patches.Reset() + ctl.Finish() + patches.Reset() + }) }) Describe("retrieve", func() { diff --git a/pkg/abac/prp/expression/expression_suite_test.go b/pkg/abac/prp/expression/expression_suite_test.go index bebbc204..1fca7aa9 100644 --- a/pkg/abac/prp/expression/expression_suite_test.go +++ b/pkg/abac/prp/expression/expression_suite_test.go @@ -13,7 +13,7 @@ package expression_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/abac/prp/expression/init_test.go b/pkg/abac/prp/expression/init_test.go index 87f20eb8..b71e8cbb 100644 --- a/pkg/abac/prp/expression/init_test.go +++ b/pkg/abac/prp/expression/init_test.go @@ -13,9 +13,9 @@ package expression_test import ( "time" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" gocache "github.com/patrickmn/go-cache" "github.com/stretchr/testify/assert" diff --git a/pkg/abac/prp/expression/memory_test.go b/pkg/abac/prp/expression/memory_test.go index 4b796059..696fe883 100644 --- a/pkg/abac/prp/expression/memory_test.go +++ b/pkg/abac/prp/expression/memory_test.go @@ -16,9 +16,9 @@ import ( "time" "github.com/TencentBlueKing/gopkg/cache" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" rds "github.com/go-redis/redis/v8" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" gocache "github.com/patrickmn/go-cache" "github.com/stretchr/testify/assert" diff --git a/pkg/abac/prp/expression/redis_test.go b/pkg/abac/prp/expression/redis_test.go index 4416c1f8..8f585e0a 100644 --- a/pkg/abac/prp/expression/redis_test.go +++ b/pkg/abac/prp/expression/redis_test.go @@ -16,9 +16,9 @@ import ( "time" "github.com/TencentBlueKing/gopkg/cache" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/cache/redis" diff --git a/pkg/abac/prp/helper_test.go b/pkg/abac/prp/helper_test.go index 6080bbc7..2792f493 100644 --- a/pkg/abac/prp/helper_test.go +++ b/pkg/abac/prp/helper_test.go @@ -14,9 +14,9 @@ import ( "errors" "time" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/types" diff --git a/pkg/abac/prp/policy/database_test.go b/pkg/abac/prp/policy/database_test.go index f7d13373..5d1d2de3 100644 --- a/pkg/abac/prp/policy/database_test.go +++ b/pkg/abac/prp/policy/database_test.go @@ -13,9 +13,9 @@ package policy import ( "errors" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/service" diff --git a/pkg/abac/prp/policy/init_test.go b/pkg/abac/prp/policy/init_test.go index 687dffdc..d9489c27 100644 --- a/pkg/abac/prp/policy/init_test.go +++ b/pkg/abac/prp/policy/init_test.go @@ -13,9 +13,9 @@ package policy_test import ( "time" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" gocache "github.com/patrickmn/go-cache" "github.com/stretchr/testify/assert" diff --git a/pkg/abac/prp/policy/memory_test.go b/pkg/abac/prp/policy/memory_test.go index c424d7ef..ed1066f4 100644 --- a/pkg/abac/prp/policy/memory_test.go +++ b/pkg/abac/prp/policy/memory_test.go @@ -16,10 +16,10 @@ import ( "time" "github.com/TencentBlueKing/gopkg/cache" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" rds "github.com/go-redis/redis/v8" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" gocache "github.com/patrickmn/go-cache" "github.com/stretchr/testify/assert" diff --git a/pkg/abac/prp/policy/policy_suite_test.go b/pkg/abac/prp/policy/policy_suite_test.go index 0eb69da2..09d3ba4b 100644 --- a/pkg/abac/prp/policy/policy_suite_test.go +++ b/pkg/abac/prp/policy/policy_suite_test.go @@ -13,7 +13,7 @@ package policy_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/abac/prp/policy/redis.go b/pkg/abac/prp/policy/redis.go index d0b6b2f2..fadda4fd 100644 --- a/pkg/abac/prp/policy/redis.go +++ b/pkg/abac/prp/policy/redis.go @@ -271,6 +271,10 @@ func (r *redisRetriever) batchSet(subjectPKPolicies map[int64][]types.AuthPolicy } func (r *redisRetriever) batchDelete(subjectPKs []int64) error { + if len(subjectPKs) == 0 { + return nil + } + keys := make([]cache.Key, 0, len(subjectPKs)) for _, subjectPK := range subjectPKs { keys = append(keys, r.genKey(subjectPK)) @@ -299,6 +303,10 @@ func (r *redisRetriever) batchDelete(subjectPKs []int64) error { } func deleteSystemSubjectPKsFromRedis(system string, subjectPKs []int64) error { + if len(subjectPKs) == 0 { + return nil + } + c := newRedisRetriever(system, -1, nil) return c.batchDelete(subjectPKs) } diff --git a/pkg/abac/prp/policy/redis_test.go b/pkg/abac/prp/policy/redis_test.go index 35abf54b..479aca38 100644 --- a/pkg/abac/prp/policy/redis_test.go +++ b/pkg/abac/prp/policy/redis_test.go @@ -16,8 +16,8 @@ import ( "time" "github.com/TencentBlueKing/gopkg/cache" - "github.com/agiledragon/gomonkey" - . "github.com/onsi/ginkgo" + "github.com/agiledragon/gomonkey/v2" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/cache/redis" diff --git a/pkg/abac/prp/policy_curd.go b/pkg/abac/prp/policy_curd.go index 71006287..e0f327c4 100644 --- a/pkg/abac/prp/policy_curd.go +++ b/pkg/abac/prp/policy_curd.go @@ -11,6 +11,7 @@ package prp import ( + "database/sql" "errors" "github.com/TencentBlueKing/gopkg/collection/set" @@ -359,6 +360,11 @@ func (m *policyManager) DeleteByActionID(systemID, actionID string) error { // 1. 查询 action pk actionPK, err := m.actionService.GetActionPK(systemID, actionID) if err != nil { + // if action already deleted, just return + if errors.Is(err, sql.ErrNoRows) { + return nil + } + err = errorWrapf(err, "actionService.GetActionPK systemID=`%s`, actionID=`%s` fail", systemID, actionID) return err } diff --git a/pkg/abac/prp/policy_curd_test.go b/pkg/abac/prp/policy_curd_test.go index 1619cb02..49099eea 100644 --- a/pkg/abac/prp/policy_curd_test.go +++ b/pkg/abac/prp/policy_curd_test.go @@ -18,9 +18,9 @@ import ( "iam/pkg/service/mock" svctypes "iam/pkg/service/types" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/abac/prp/policy_list_saas_test.go b/pkg/abac/prp/policy_list_saas_test.go index 820d522e..99076cf5 100644 --- a/pkg/abac/prp/policy_list_saas_test.go +++ b/pkg/abac/prp/policy_list_saas_test.go @@ -11,7 +11,7 @@ package prp_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("PolicyListSaas", func() { diff --git a/pkg/abac/prp/policy_list_test.go b/pkg/abac/prp/policy_list_test.go index 4a7bd0d2..08ffd0b4 100644 --- a/pkg/abac/prp/policy_list_test.go +++ b/pkg/abac/prp/policy_list_test.go @@ -11,7 +11,7 @@ package prp_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("PolicyList", func() { diff --git a/pkg/abac/prp/policy_test.go b/pkg/abac/prp/policy_test.go index a08986c9..0a030538 100644 --- a/pkg/abac/prp/policy_test.go +++ b/pkg/abac/prp/policy_test.go @@ -11,7 +11,7 @@ package prp_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("Policy", func() { diff --git a/pkg/abac/prp/prp_suite_test.go b/pkg/abac/prp/prp_suite_test.go index 112d42cb..2d1e2d2b 100644 --- a/pkg/abac/prp/prp_suite_test.go +++ b/pkg/abac/prp/prp_suite_test.go @@ -13,7 +13,7 @@ package prp_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/abac/types/action_test.go b/pkg/abac/types/action_test.go index ec402d50..3120556c 100644 --- a/pkg/abac/types/action_test.go +++ b/pkg/abac/types/action_test.go @@ -13,7 +13,7 @@ package types_test import ( "iam/pkg/abac/types" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/abac/types/attribute_test.go b/pkg/abac/types/attribute_test.go index 0ce50015..ed8c2aab 100644 --- a/pkg/abac/types/attribute_test.go +++ b/pkg/abac/types/attribute_test.go @@ -13,7 +13,7 @@ package types_test import ( "iam/pkg/abac/types" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/abac/types/constant.go b/pkg/abac/types/constant.go index 9cb791b6..ed256838 100644 --- a/pkg/abac/types/constant.go +++ b/pkg/abac/types/constant.go @@ -17,4 +17,11 @@ const ( PKAttrName = "pk" GroupAttrName = "group" DeptAttrName = "department" + + IamPath = "_bk_iam_path_" + IamPathSuffix = "." + IamPath + + IamEnv = "_bk_iam_env_" + IamEnvSuffix = "." + IamEnv + IamEnvTzSuffix = IamEnvSuffix + ".tz" ) diff --git a/pkg/abac/types/request/request_suite_test.go b/pkg/abac/types/request/request_suite_test.go index 45c17c1b..8a751749 100644 --- a/pkg/abac/types/request/request_suite_test.go +++ b/pkg/abac/types/request/request_suite_test.go @@ -13,7 +13,7 @@ package request_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/abac/types/request/request_test.go b/pkg/abac/types/request/request_test.go index c73c7445..2b274fa2 100644 --- a/pkg/abac/types/request/request_test.go +++ b/pkg/abac/types/request/request_test.go @@ -11,7 +11,7 @@ package request_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/types" diff --git a/pkg/abac/types/subject_test.go b/pkg/abac/types/subject_test.go index 2c875b0d..1fdd71f5 100644 --- a/pkg/abac/types/subject_test.go +++ b/pkg/abac/types/subject_test.go @@ -13,7 +13,7 @@ package types_test import ( "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/types" diff --git a/pkg/abac/types/types_suite_test.go b/pkg/abac/types/types_suite_test.go index 7f0a208b..7a5c9cfb 100644 --- a/pkg/abac/types/types_suite_test.go +++ b/pkg/abac/types/types_suite_test.go @@ -13,7 +13,7 @@ package types_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/api/common/common_suite_test.go b/pkg/api/common/common_suite_test.go index a7e3d8fb..44fc00ed 100644 --- a/pkg/api/common/common_suite_test.go +++ b/pkg/api/common/common_suite_test.go @@ -13,7 +13,7 @@ package common_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/api/common/init_test.go b/pkg/api/common/init_test.go index 6aab6013..0f8fef45 100644 --- a/pkg/api/common/init_test.go +++ b/pkg/api/common/init_test.go @@ -11,7 +11,7 @@ package common import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/config" diff --git a/pkg/api/model/handler/action.go b/pkg/api/model/handler/action.go index 305f55aa..a0d3f3ca 100644 --- a/pkg/api/model/handler/action.go +++ b/pkg/api/model/handler/action.go @@ -90,6 +90,7 @@ func BatchCreateActions(c *gin.Context) { RelatedActions: ac.RelatedActions, } action.RelatedResourceTypes = convertToRelatedResourceTypes(ac.RelatedResourceTypes) + action.RelatedEnvironments = convertToRelatedEnvironments(ac.RelatedEnvironments) actions = append(actions, action) } @@ -181,6 +182,9 @@ func UpdateAction(c *gin.Context) { if _, ok := data["related_actions"]; ok { allowEmptyFields.AddKey("RelatedActions") } + if _, ok := data["related_environments"]; ok { + allowEmptyFields.AddKey("RelatedEnvironments") + } if _, ok := data["description"]; ok { allowEmptyFields.AddKey("Description") } @@ -197,6 +201,7 @@ func UpdateAction(c *gin.Context) { Type: body.Type, RelatedResourceTypes: convertToRelatedResourceTypes(body.RelatedResourceTypes), RelatedActions: body.RelatedActions, + RelatedEnvironments: convertToRelatedEnvironments(body.RelatedEnvironments), AllowEmptyFields: allowEmptyFields, } diff --git a/pkg/api/model/handler/action_check_test.go b/pkg/api/model/handler/action_check_test.go index 9f7e4937..bbf1bc5b 100644 --- a/pkg/api/model/handler/action_check_test.go +++ b/pkg/api/model/handler/action_check_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("ActionCheck", func() { diff --git a/pkg/api/model/handler/action_slz.go b/pkg/api/model/handler/action_slz.go index 669c9180..f39bdf00 100644 --- a/pkg/api/model/handler/action_slz.go +++ b/pkg/api/model/handler/action_slz.go @@ -33,6 +33,16 @@ type relatedResourceType struct { RelatedInstanceSelections []referenceInstanceSelection `json:"related_instance_selections" binding:"omitempty"` } +// relatedEnvironment, currently only support `current_timestamp`. +// if we support more types, should add a `validate` method, each type has different operators. +type relatedEnvironment struct { + // NOTE: currently only support period_daily, will support current_timestamp later + // and no operators now! + // only one field, but should be a struct! keep extensible in the future + Type string `json:"type" binding:"oneof=period_daily" example:"period_daily"` + // Operators []string `json:"operators" binding:"omitempty,unique"` +} + type actionSerializer struct { ID string `json:"id" binding:"required,max=32" example:"biz_create"` Name string `json:"name" binding:"required" example:"biz_create"` @@ -45,6 +55,7 @@ type actionSerializer struct { RelatedResourceTypes []relatedResourceType `json:"related_resource_types"` RelatedActions []string `json:"related_actions"` + RelatedEnvironments []relatedEnvironment `json:"related_environments" binding:"omitempty"` Version int64 `json:"version" binding:"omitempty,gte=1" example:"1"` } @@ -59,7 +70,9 @@ type actionUpdateSerializer struct { RelatedResourceTypes []relatedResourceType `json:"related_resource_types"` RelatedActions []string `json:"related_actions"` - Version int64 `json:"version" binding:"omitempty,gte=1" example:"1"` + RelatedEnvironments []relatedEnvironment `json:"related_environments" binding:"omitempty"` + + Version int64 `json:"version" binding:"omitempty,gte=1" example:"1"` } func (a *actionUpdateSerializer) validate(keys map[string]interface{}) (bool, string) { @@ -96,6 +109,13 @@ func (a *actionUpdateSerializer) validate(keys map[string]interface{}) (bool, st } } + if len(a.RelatedEnvironments) > 0 { + valid, message := validateRelatedEnvironments(a.RelatedEnvironments, "") + if !valid { + return false, message + } + } + return true, "valid" } @@ -111,33 +131,54 @@ func validateRelatedInstanceSelections(data []referenceInstanceSelection, action return true, "valid" } +func validateRelatedEnvironments(data []relatedEnvironment, actionID string) (bool, string) { + typeID := set.NewStringSet() + for index, d := range data { + if err := binding.Validator.ValidateStruct(d); err != nil { + message := fmt.Sprintf("data of action_id=%s related_environments[%d], %s", + actionID, index, util.ValidationErrorMessage(err)) + return false, message + } + + // 校验 data.ID 没有重复 + if typeID.Has(d.Type) { + message := fmt.Sprintf("data of action_id=%s related_environments[%d] id should not repeat", + actionID, index) + return false, message + } + + typeID.Add(d.Type) + } + return true, "valid" +} + func validateRelatedResourceTypes(data []relatedResourceType, actionID string) (bool, string) { resourceTypeID := set.NewStringSet() - for index, data := range data { - if err := binding.Validator.ValidateStruct(data); err != nil { + for index, d := range data { + if err := binding.Validator.ValidateStruct(d); err != nil { message := fmt.Sprintf("data of action_id=%s related_resource_types[%d], %s", actionID, index, util.ValidationErrorMessage(err)) return false, message } // 校验 data.ID 没有重复 - if resourceTypeID.Has(data.ID) { + if resourceTypeID.Has(d.ID) { message := fmt.Sprintf("data of action_id=%s related_resource_types[%d] id"+ " should not repeat", actionID, index) return false, message } - resourceTypeID.Add(data.ID) + resourceTypeID.Add(d.ID) - relatedResourceTypeID := fmt.Sprintf("system_id=%s,id=%s", data.SystemID, data.ID) + relatedResourceTypeID := fmt.Sprintf("system_id=%s,id=%s", d.SystemID, d.ID) // selection_mode = attribute的时候, related_instance_selections 可以为空, // 其他情况: instance OR all, 不能为空 - if data.SelectionMode != SelectionModeAttribute { - if len(data.RelatedInstanceSelections) > 0 { + if d.SelectionMode != SelectionModeAttribute { + if len(d.RelatedInstanceSelections) > 0 { // validate if not empty - valid, message := validateRelatedInstanceSelections(data.RelatedInstanceSelections, actionID, relatedResourceTypeID) + valid, message := validateRelatedInstanceSelections(d.RelatedInstanceSelections, actionID, relatedResourceTypeID) if !valid { return false, message } @@ -172,6 +213,13 @@ func validateAction(body []actionSerializer) (bool, string) { return false, message } } + + if len(data.RelatedEnvironments) > 0 { + valid, message := validateRelatedEnvironments(data.RelatedEnvironments, data.ID) + if !valid { + return false, message + } + } } return true, "valid" } diff --git a/pkg/api/model/handler/action_slz_test.go b/pkg/api/model/handler/action_slz_test.go index 594e9e58..90c309a2 100644 --- a/pkg/api/model/handler/action_slz_test.go +++ b/pkg/api/model/handler/action_slz_test.go @@ -11,7 +11,7 @@ package handler import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/api/model/handler/action_test.go b/pkg/api/model/handler/action_test.go index 50ed3a1b..abd00d9a 100644 --- a/pkg/api/model/handler/action_test.go +++ b/pkg/api/model/handler/action_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("Action", func() { diff --git a/pkg/api/model/handler/handler_suite_test.go b/pkg/api/model/handler/handler_suite_test.go index 410bb9f9..caf2c6d1 100644 --- a/pkg/api/model/handler/handler_suite_test.go +++ b/pkg/api/model/handler/handler_suite_test.go @@ -13,7 +13,7 @@ package handler_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/api/model/handler/instance_selection_check_test.go b/pkg/api/model/handler/instance_selection_check_test.go index f8e7b730..3d388ee6 100644 --- a/pkg/api/model/handler/instance_selection_check_test.go +++ b/pkg/api/model/handler/instance_selection_check_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("InstanceSelectionCheck", func() { diff --git a/pkg/api/model/handler/instance_selection_slz_test.go b/pkg/api/model/handler/instance_selection_slz_test.go index fedd710c..9a5ba52c 100644 --- a/pkg/api/model/handler/instance_selection_slz_test.go +++ b/pkg/api/model/handler/instance_selection_slz_test.go @@ -11,7 +11,7 @@ package handler import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/api/model/handler/instance_selection_test.go b/pkg/api/model/handler/instance_selection_test.go index c0039e1a..b8b477fe 100644 --- a/pkg/api/model/handler/instance_selection_test.go +++ b/pkg/api/model/handler/instance_selection_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("InstanceSelection", func() { diff --git a/pkg/api/model/handler/query_test.go b/pkg/api/model/handler/query_test.go index 8d7a1371..e376d49b 100644 --- a/pkg/api/model/handler/query_test.go +++ b/pkg/api/model/handler/query_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("Query", func() { diff --git a/pkg/api/model/handler/resource_type_check_test.go b/pkg/api/model/handler/resource_type_check_test.go index 299c8fb2..2a2c7c11 100644 --- a/pkg/api/model/handler/resource_type_check_test.go +++ b/pkg/api/model/handler/resource_type_check_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("ResourceTypeCheck", func() { diff --git a/pkg/api/model/handler/resource_type_slz_test.go b/pkg/api/model/handler/resource_type_slz_test.go index 7a7d9951..9465a811 100644 --- a/pkg/api/model/handler/resource_type_slz_test.go +++ b/pkg/api/model/handler/resource_type_slz_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("ResourceTypeSlz", func() { diff --git a/pkg/api/model/handler/resource_type_test.go b/pkg/api/model/handler/resource_type_test.go index a7af692c..8134010d 100644 --- a/pkg/api/model/handler/resource_type_test.go +++ b/pkg/api/model/handler/resource_type_test.go @@ -11,7 +11,7 @@ package handler import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/api/model/handler/system_check_test.go b/pkg/api/model/handler/system_check_test.go index 86a3ad5a..819ef5e8 100644 --- a/pkg/api/model/handler/system_check_test.go +++ b/pkg/api/model/handler/system_check_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("SystemCheck", func() { diff --git a/pkg/api/model/handler/system_config_check_test.go b/pkg/api/model/handler/system_config_check_test.go index 8a4bc3a6..e928e060 100644 --- a/pkg/api/model/handler/system_config_check_test.go +++ b/pkg/api/model/handler/system_config_check_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("SystemConfigCheck", func() { diff --git a/pkg/api/model/handler/system_config_slz_test.go b/pkg/api/model/handler/system_config_slz_test.go index d19457f3..116c6608 100644 --- a/pkg/api/model/handler/system_config_slz_test.go +++ b/pkg/api/model/handler/system_config_slz_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("SystemConfigSlz", func() { diff --git a/pkg/api/model/handler/system_config_test.go b/pkg/api/model/handler/system_config_test.go index 59db3e75..f9abc886 100644 --- a/pkg/api/model/handler/system_config_test.go +++ b/pkg/api/model/handler/system_config_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("SystemConfig", func() { diff --git a/pkg/api/model/handler/system_slz_test.go b/pkg/api/model/handler/system_slz_test.go index 6a891496..2cd12bb5 100644 --- a/pkg/api/model/handler/system_slz_test.go +++ b/pkg/api/model/handler/system_slz_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("SystemSlz", func() { diff --git a/pkg/api/model/handler/system_test.go b/pkg/api/model/handler/system_test.go index 0e6db01c..befcf1de 100644 --- a/pkg/api/model/handler/system_test.go +++ b/pkg/api/model/handler/system_test.go @@ -15,10 +15,10 @@ import ( "net/http" "testing" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/gin-gonic/gin" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/steinfletcher/apitest" "github.com/stretchr/testify/assert" @@ -88,9 +88,11 @@ func TestCreateSystem(t *testing.T) { }).BadRequestContainsMessage("bad request:invalid id") }) + cacheimpls.InitVerifyAppCodeAppSecret(false) + // init the router r := util.SetupRouter() - r.Use(middleware.ClientAuthMiddleware([]byte(""), false)) + r.Use(middleware.ClientAuthMiddleware([]byte(""))) url := "/api/v1/systems" r.POST(url, CreateSystem) @@ -99,10 +101,7 @@ func TestCreateSystem(t *testing.T) { appSecret := "123" cacheimpls.InitCaches(false) - cacheimpls.LocalAppCodeAppSecretCache.Set(cacheimpls.AppCodeAppSecretCacheKey{ - AppCode: appCode, - AppSecret: appSecret, - }, true) + cacheimpls.LocalAppCodeAppSecretCache.Set(appCode+":"+appSecret, true, 0) // for mock var ctl *gomock.Controller @@ -285,9 +284,11 @@ func TestUpdateSystem(t *testing.T) { }).BadRequestContainsMessage("bad request:") }) + cacheimpls.InitVerifyAppCodeAppSecret(false) + // init the router r := util.SetupRouter() - r.Use(middleware.ClientAuthMiddleware([]byte(""), false)) + r.Use(middleware.ClientAuthMiddleware([]byte(""))) url := "/api/v1/systems/test" r.POST(url, UpdateSystem) @@ -296,10 +297,7 @@ func TestUpdateSystem(t *testing.T) { appSecret := "123" cacheimpls.InitCaches(false) - cacheimpls.LocalAppCodeAppSecretCache.Set(cacheimpls.AppCodeAppSecretCacheKey{ - AppCode: appCode, - AppSecret: appSecret, - }, true) + cacheimpls.LocalAppCodeAppSecretCache.Set(appCode+":"+appSecret, true, 0) // for mock var ctl *gomock.Controller diff --git a/pkg/api/model/handler/token_test.go b/pkg/api/model/handler/token_test.go index 81825785..a1b2d8ed 100644 --- a/pkg/api/model/handler/token_test.go +++ b/pkg/api/model/handler/token_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("Token", func() { diff --git a/pkg/api/model/handler/types_test.go b/pkg/api/model/handler/types_test.go index c965ee66..82197248 100644 --- a/pkg/api/model/handler/types_test.go +++ b/pkg/api/model/handler/types_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/api/model/handler" diff --git a/pkg/api/model/handler/util.go b/pkg/api/model/handler/util.go index b005fb3e..64b738bd 100644 --- a/pkg/api/model/handler/util.go +++ b/pkg/api/model/handler/util.go @@ -50,3 +50,13 @@ func convertToRelatedResourceTypes(rrts []relatedResourceType) []svctypes.Action } return arts } + +func convertToRelatedEnvironments(res []relatedEnvironment) []svctypes.ActionEnvironment { + aes := make([]svctypes.ActionEnvironment, 0, len(res)) + for _, re := range res { + aes = append(aes, svctypes.ActionEnvironment{ + Type: re.Type, + }) + } + return aes +} diff --git a/pkg/api/model/handler/util_test.go b/pkg/api/model/handler/util_test.go index 768b46b1..d2c912d0 100644 --- a/pkg/api/model/handler/util_test.go +++ b/pkg/api/model/handler/util_test.go @@ -11,7 +11,7 @@ package handler_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("Util", func() { diff --git a/pkg/api/model/handler/validation_test.go b/pkg/api/model/handler/validation_test.go index e9e9fd3c..5f2fd3a9 100644 --- a/pkg/api/model/handler/validation_test.go +++ b/pkg/api/model/handler/validation_test.go @@ -17,7 +17,7 @@ import ( "net/http/httptest" "github.com/gin-gonic/gin" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/api/policy/handler/auth.go b/pkg/api/policy/handler/auth.go index 52b565db..312c0349 100644 --- a/pkg/api/policy/handler/auth.go +++ b/pkg/api/policy/handler/auth.go @@ -12,13 +12,14 @@ package handler import ( "errors" + "time" "github.com/TencentBlueKing/gopkg/errorx" "github.com/gin-gonic/gin" "iam/pkg/abac/pdp" + "iam/pkg/abac/pdp/evalctx" "iam/pkg/abac/pdp/evaluation" - pdptypes "iam/pkg/abac/pdp/types" "iam/pkg/abac/types" "iam/pkg/abac/types/request" "iam/pkg/cacheimpls" @@ -278,6 +279,12 @@ func BatchAuthByResources(c *gin.Context) { return } + // TODO: move to pdp/entrance.go + if entry != nil { + envs, _ := evalctx.GenTimeEnvsFromCache(pdp.DefaultTz, time.Now()) + debug.WithValue(entry, "env", envs) + } + // do eval for each resource for _, resources := range body.ResourcesList { // TODO: 这里下沉到下一层, 不应该直接依赖evaluation, 只应该依赖pdp @@ -294,7 +301,7 @@ func BatchAuthByResources(c *gin.Context) { } // do eval - isAllowed, _, err := evaluation.EvalPolicies(pdptypes.NewEvalContext(r), policies) + isAllowed, _, err := evaluation.EvalPolicies(evalctx.NewEvalContext(r), policies) if err != nil { err = errorWrapf(err, " pdp.EvalPolicies req=`%+v`, policies=`%+v` fail", r, policies) util.SystemErrorJSONResponseWithDebug(c, err, entry) diff --git a/pkg/api/policy/handler/util_suite_test.go b/pkg/api/policy/handler/util_suite_test.go index 410bb9f9..caf2c6d1 100644 --- a/pkg/api/policy/handler/util_suite_test.go +++ b/pkg/api/policy/handler/util_suite_test.go @@ -13,7 +13,7 @@ package handler_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/api/policy/handler/util_test.go b/pkg/api/policy/handler/util_test.go index 34ecc818..0c92d98b 100644 --- a/pkg/api/policy/handler/util_test.go +++ b/pkg/api/policy/handler/util_test.go @@ -17,8 +17,8 @@ import ( "github.com/TencentBlueKing/gopkg/cache" "github.com/TencentBlueKing/gopkg/cache/memory" - "github.com/agiledragon/gomonkey" - . "github.com/onsi/ginkgo" + "github.com/agiledragon/gomonkey/v2" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/abac/types" diff --git a/pkg/api/web/handler/action_slz.go b/pkg/api/web/handler/action_slz.go index b75d6763..ca4a4a9c 100644 --- a/pkg/api/web/handler/action_slz.go +++ b/pkg/api/web/handler/action_slz.go @@ -12,7 +12,7 @@ package handler const ( actionSupportFields = "id,name,name_en,related_resource_types,version,type," + - "description,description_en,related_actions" + "description,description_en,related_actions,related_environments" actionDefaultFields = "id,name,name_en" ) diff --git a/pkg/api/web/handler/handler_suite_test.go b/pkg/api/web/handler/handler_suite_test.go index 90cc0849..ee4b4695 100644 --- a/pkg/api/web/handler/handler_suite_test.go +++ b/pkg/api/web/handler/handler_suite_test.go @@ -13,7 +13,7 @@ package handler_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/api/web/handler/model_change_event.go b/pkg/api/web/handler/model_change_event.go index 7d512e94..89ac27e5 100644 --- a/pkg/api/web/handler/model_change_event.go +++ b/pkg/api/web/handler/model_change_event.go @@ -13,6 +13,7 @@ package handler import ( "github.com/gin-gonic/gin" + "github.com/TencentBlueKing/gopkg/conv" "github.com/TencentBlueKing/gopkg/errorx" "iam/pkg/service" @@ -22,10 +23,13 @@ import ( // ListModelChangeEvent 查询变更事件列表 func ListModelChangeEvent(c *gin.Context) { status := c.Query("status") + limit, err := conv.ToInt64(c.Query("limit")) + if err != nil { + limit = 1000 + } svc := service.NewModelChangeService() - events, err := svc.ListByStatus(status) - + events, err := svc.ListByStatus(status, limit) if err != nil { err = errorx.Wrapf(err, "Handler", "ListModelChangeEvent", "status=`%s`", status) util.SystemErrorJSONResponse(c, err) @@ -43,7 +47,7 @@ func UpdateModelChangeEvent(c *gin.Context) { return } - eventPK, err := util.StringToInt64(c.Param("event_pk")) + eventPK, err := conv.ToInt64(c.Param("event_pk")) if err != nil { util.BadRequestErrorJSONResponse(c, err.Error()) return diff --git a/pkg/api/web/handler/perm_template_test.go b/pkg/api/web/handler/perm_template_test.go index b10f3ca2..b5c4c516 100644 --- a/pkg/api/web/handler/perm_template_test.go +++ b/pkg/api/web/handler/perm_template_test.go @@ -14,7 +14,7 @@ import ( "errors" "testing" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" "iam/pkg/abac/prp" diff --git a/pkg/api/web/handler/policy.go b/pkg/api/web/handler/policy.go index 61079f94..4ecc1e70 100644 --- a/pkg/api/web/handler/policy.go +++ b/pkg/api/web/handler/policy.go @@ -313,3 +313,17 @@ func DeleteActionPolicies(c *gin.Context) { util.SuccessJSONResponse(c, "ok", gin.H{}) } + +// DeleteUnreferencedExpressions clean not quoted expression +func DeleteUnreferencedExpressions(c *gin.Context) { + manager := service.NewPolicyService() + + err := manager.DeleteUnreferencedExpressions() + if err != nil { + err = errorx.Wrapf(err, "Handler", "DeleteUnreferencedExpressions", "") + util.SystemErrorJSONResponse(c, err) + return + } + + util.SuccessJSONResponse(c, "ok", gin.H{}) +} diff --git a/pkg/api/web/handler/policy_test.go b/pkg/api/web/handler/policy_test.go index 666fc135..8ddb3122 100644 --- a/pkg/api/web/handler/policy_test.go +++ b/pkg/api/web/handler/policy_test.go @@ -18,7 +18,7 @@ import ( "iam/pkg/abac/prp/mock" "iam/pkg/util" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" ) diff --git a/pkg/api/web/handler/subject_test.go b/pkg/api/web/handler/subject_test.go index d240db68..b0a5fb8c 100644 --- a/pkg/api/web/handler/subject_test.go +++ b/pkg/api/web/handler/subject_test.go @@ -14,7 +14,7 @@ import ( "errors" "testing" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" pl "iam/pkg/abac/prp/policy" diff --git a/pkg/api/web/router.go b/pkg/api/web/router.go index a3f28445..2983e4f5 100644 --- a/pkg/api/web/router.go +++ b/pkg/api/web/router.go @@ -119,4 +119,7 @@ func Register(r *gin.RouterGroup) { // 模型变更事件 r.GET("/model-change-event", handler.ListModelChangeEvent) r.PUT("/model-change-event/:event_pk", handler.UpdateModelChangeEvent) + + // 清理未引用的expression + r.DELETE("/unreferenced-expressions", handler.DeleteUnreferencedExpressions) } diff --git a/pkg/cacheimpls/action_detail_test.go b/pkg/cacheimpls/action_detail_test.go index 7e14b5c5..c40ed274 100644 --- a/pkg/cacheimpls/action_detail_test.go +++ b/pkg/cacheimpls/action_detail_test.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" diff --git a/pkg/cacheimpls/action_pk_test.go b/pkg/cacheimpls/action_pk_test.go index 29c421c5..642fccda 100644 --- a/pkg/cacheimpls/action_pk_test.go +++ b/pkg/cacheimpls/action_pk_test.go @@ -18,7 +18,7 @@ import ( "iam/pkg/service" "iam/pkg/service/mock" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) diff --git a/pkg/cacheimpls/cacheimpls_suite_test.go b/pkg/cacheimpls/cacheimpls_suite_test.go new file mode 100644 index 00000000..6373dd4d --- /dev/null +++ b/pkg/cacheimpls/cacheimpls_suite_test.go @@ -0,0 +1,13 @@ +package cacheimpls_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCacheimpls(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Cacheimpls Suite") +} diff --git a/pkg/cacheimpls/impls_suite_test.go b/pkg/cacheimpls/impls_suite_test.go deleted file mode 100644 index 0010ca6b..00000000 --- a/pkg/cacheimpls/impls_suite_test.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available. - * Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. - * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at http://opensource.org/licenses/MIT - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package cacheimpls_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestImpls(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Impls Suite") -} diff --git a/pkg/cacheimpls/init.go b/pkg/cacheimpls/init.go index 34e951bb..c324a9b0 100644 --- a/pkg/cacheimpls/init.go +++ b/pkg/cacheimpls/init.go @@ -29,7 +29,7 @@ const CacheLayer = "Cache" // LocalAppCodeAppSecretCache ... var ( - LocalAppCodeAppSecretCache memory.Cache + LocalAppCodeAppSecretCache *gocache.Cache LocalAuthAppAccessKeyCache *gocache.Cache LocalSubjectCache memory.Cache LocalSubjectRoleCache memory.Cache @@ -74,13 +74,7 @@ func newRandomDuration(seconds int) backend.RandomExtraExpirationDurationFunc { // Cache should only know about get/retrieve data // ! DO NOT CARE ABOUT WHAT THE DATA WILL BE USED FOR func InitCaches(disabled bool) { - LocalAppCodeAppSecretCache = memory.NewCache( - "app_code_app_secret", - disabled, - retrieveAppCodeAppSecret, - 12*time.Hour, - nil, - ) + LocalAppCodeAppSecretCache = gocache.New(12*time.Hour, 5*time.Minute) // auth app_code/app_secret cache LocalAuthAppAccessKeyCache = gocache.New(12*time.Hour, 5*time.Minute) @@ -266,3 +260,14 @@ func InitPolicyCacheSettings(disabled bool, expirationDays int64) { log.Warn("the LocalPolicyCache is disabled! Will query policy from database!") } } + +// InitVerifyAppCodeAppSecret ... +func InitVerifyAppCodeAppSecret(enableBkAuth bool) { + if enableBkAuth { + VerifyAppCodeAppSecret = VerifyAppCodeAppSecretFromAuth + log.Infof("init VerifyAppCodeAppSecret to VerifyAppCodeAppSecretFromAuth") + } else { + VerifyAppCodeAppSecret = VerifyAppCodeAppSecretFromDB + log.Infof("init VerifyAppCodeAppSecret to VerifyAppCodeAppSecretFromDB") + } +} diff --git a/pkg/cacheimpls/init_test.go b/pkg/cacheimpls/init_test.go index 47a853cc..83b11b77 100644 --- a/pkg/cacheimpls/init_test.go +++ b/pkg/cacheimpls/init_test.go @@ -18,8 +18,8 @@ import ( func TestInitCaches(t *testing.T) { InitCaches(true) - assert.True(t, LocalAppCodeAppSecretCache.Disabled()) + assert.True(t, LocalSubjectCache.Disabled()) InitCaches(false) - assert.False(t, LocalAppCodeAppSecretCache.Disabled()) + assert.False(t, LocalSubjectCache.Disabled()) } diff --git a/pkg/cacheimpls/local_apigw_jwt_client_id_test.go b/pkg/cacheimpls/local_apigw_jwt_client_id_test.go index 85c4206b..d0c777ab 100644 --- a/pkg/cacheimpls/local_apigw_jwt_client_id_test.go +++ b/pkg/cacheimpls/local_apigw_jwt_client_id_test.go @@ -15,7 +15,7 @@ import ( "github.com/TencentBlueKing/gopkg/cache" "github.com/TencentBlueKing/gopkg/cache/memory" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/cacheimpls/local_app_code_secret.go b/pkg/cacheimpls/local_app_code_secret.go index 9ba08634..89dff26a 100644 --- a/pkg/cacheimpls/local_app_code_secret.go +++ b/pkg/cacheimpls/local_app_code_secret.go @@ -13,7 +13,6 @@ package cacheimpls import ( "time" - "github.com/TencentBlueKing/gopkg/cache" "github.com/TencentBlueKing/gopkg/stringx" gocache "github.com/patrickmn/go-cache" log "github.com/sirupsen/logrus" @@ -22,39 +21,38 @@ import ( "iam/pkg/database/edao" ) -// AppCodeAppSecretCacheKey ... -type AppCodeAppSecretCacheKey struct { - AppCode string - AppSecret string -} +// VerifyAppCodeAppSecret ... +var VerifyAppCodeAppSecret func(appCode, appSecret string) bool -// Key ... -func (k AppCodeAppSecretCacheKey) Key() string { - return k.AppCode + ":" + k.AppSecret -} +// VerifyAppCodeAppSecretFromDB ... +func VerifyAppCodeAppSecretFromDB(appCode, appSecret string) bool { + // 1. get from cache + key := appCode + ":" + appSecret -func retrieveAppCodeAppSecret(key cache.Key) (interface{}, error) { - k := key.(AppCodeAppSecretCacheKey) + value, found := LocalAppCodeAppSecretCache.Get(key) + if found { + return value.(bool) + } + // 2. get from database manager := edao.NewAppSecretManager() - return manager.Exists(k.AppCode, k.AppSecret) -} - -// VerifyAppCodeAppSecret ... -func VerifyAppCodeAppSecret(appCode, appSecret string) bool { - key := AppCodeAppSecretCacheKey{ - AppCode: appCode, - AppSecret: appSecret, - } - exists, err := LocalAppCodeAppSecretCache.GetBool(key) + valid, err := manager.Exists(appCode, appSecret) if err != nil { - log.Errorf("get app_code_app_secret from memory cache fail, app_code=%s, app_secret=%s, err=%s", + log.Errorf("verify app_code_app_secret from bk_paas+esb_app_account fail, app_code=%s, app_secret=%s, err=%s", appCode, stringx.Truncate(appSecret, 6)+"******", err) return false } - return exists + + // 3. set to cache, default 12 hours, if not valid, only keep in cache for 1 minutes + // in case of auth server down, we can still get the valid matched accessKeys from cache + ttl := gocache.DefaultExpiration + if !valid { + ttl = 1 * time.Minute + } + LocalAppCodeAppSecretCache.Set(key, valid, ttl) + return valid } func VerifyAppCodeAppSecretFromAuth(appCode, appSecret string) bool { diff --git a/pkg/cacheimpls/local_app_code_secret_test.go b/pkg/cacheimpls/local_app_code_secret_test.go index 5434ec36..14ea0e44 100644 --- a/pkg/cacheimpls/local_app_code_secret_test.go +++ b/pkg/cacheimpls/local_app_code_secret_test.go @@ -1,53 +1,127 @@ -/* - * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available. - * Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. - * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at http://opensource.org/licenses/MIT - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package cacheimpls +package cacheimpls_test import ( "errors" - "testing" "time" - "github.com/TencentBlueKing/gopkg/cache" - "github.com/TencentBlueKing/gopkg/cache/memory" + "github.com/agiledragon/gomonkey/v2" + "github.com/golang/mock/gomock" + . "github.com/onsi/ginkgo/v2" + gocache "github.com/patrickmn/go-cache" "github.com/stretchr/testify/assert" + + "iam/pkg/cacheimpls" + "iam/pkg/component" + mock2 "iam/pkg/component/mock" + "iam/pkg/database/edao" + "iam/pkg/database/edao/mock" ) -func TestAppCodeAppSecretCacheKey_Key(t *testing.T) { - k := AppCodeAppSecretCacheKey{ - AppCode: "hello", - AppSecret: "123", - } - assert.Equal(t, "hello:123", k.Key()) -} - -func TestVerifyAppCodeAppSecret(t *testing.T) { - var ( - expiration = 5 * time.Minute - ) - - // valid - retrieveFunc := func(key cache.Key) (interface{}, error) { - return true, nil - } - mockCache := memory.NewCache( - "mockCache", false, retrieveFunc, expiration, nil) - LocalAppCodeAppSecretCache = mockCache - assert.True(t, VerifyAppCodeAppSecret("test", "123")) - - // error - retrieveFunc = func(key cache.Key) (interface{}, error) { - return false, errors.New("error here") - } - mockCache = memory.NewCache( - "mockCache", false, retrieveFunc, expiration, nil) - LocalAppCodeAppSecretCache = mockCache - assert.False(t, VerifyAppCodeAppSecret("test", "123")) -} +var _ = Describe("LocalAppCodeSecret", func() { + + Describe("VerifyAppCodeAppSecret", func() { + var ctl *gomock.Controller + var mockManager *mock.MockAppSecretManager + var patches *gomonkey.Patches + BeforeEach(func() { + cacheimpls.LocalAppCodeAppSecretCache = gocache.New(12*time.Hour, 5*time.Minute) + + ctl = gomock.NewController(GinkgoT()) + mockManager = mock.NewMockAppSecretManager(ctl) + + }) + + AfterEach(func() { + ctl.Finish() + if patches != nil { + patches.Reset() + } + }) + + It("hit", func() { + cacheimpls.LocalAppCodeAppSecretCache.Set("app:123", true, 12*time.Hour) + ok := cacheimpls.VerifyAppCodeAppSecretFromDB("app", "123") + assert.True(GinkgoT(), ok) + }) + + It("miss, get from database error", func() { + mockManager.EXPECT().Exists(gomock.Any(), gomock.Any()).Return(false, errors.New("errror happend")).AnyTimes() + patches = gomonkey.ApplyFunc(edao.NewAppSecretManager, + func() edao.AppSecretManager { + return mockManager + }) + + ok := cacheimpls.VerifyAppCodeAppSecretFromDB("app", "123") + assert.False(GinkgoT(), ok) + }) + + It("miss, get from database valid", func() { + mockManager.EXPECT().Exists(gomock.Any(), gomock.Any()).Return(false, nil).AnyTimes() + patches = gomonkey.ApplyFunc(edao.NewAppSecretManager, + func() edao.AppSecretManager { + return mockManager + }) + + ok := cacheimpls.VerifyAppCodeAppSecretFromDB("app", "123") + assert.False(GinkgoT(), ok) + + }) + + It("miss, get from database invalid", func() { + mockManager.EXPECT().Exists(gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes() + patches = gomonkey.ApplyFunc(edao.NewAppSecretManager, + func() edao.AppSecretManager { + return mockManager + }) + + ok := cacheimpls.VerifyAppCodeAppSecretFromDB("app", "123") + assert.True(GinkgoT(), ok) + }) + }) + + Describe("VerifyAppCodeAppSecretFromAuth", func() { + var ctl *gomock.Controller + var mockCli *mock2.MockAuthClient + BeforeEach(func() { + cacheimpls.LocalAuthAppAccessKeyCache = gocache.New(12*time.Hour, 5*time.Minute) + + ctl = gomock.NewController(GinkgoT()) + mockCli = mock2.NewMockAuthClient(ctl) + }) + + AfterEach(func() { + ctl.Finish() + }) + + It("hit", func() { + cacheimpls.LocalAuthAppAccessKeyCache.Set("app:123", true, 12*time.Hour) + ok := cacheimpls.VerifyAppCodeAppSecretFromAuth("app", "123") + assert.True(GinkgoT(), ok) + }) + + It("miss, get from bkauth error", func() { + mockCli.EXPECT().Verify(gomock.Any(), gomock.Any()).Return(false, errors.New("errror happend")).AnyTimes() + component.BkAuth = mockCli + + ok := cacheimpls.VerifyAppCodeAppSecretFromAuth("app", "123") + assert.False(GinkgoT(), ok) + }) + + It("miss, get from bkauth valid", func() { + mockCli.EXPECT().Verify(gomock.Any(), gomock.Any()).Return(false, nil).AnyTimes() + component.BkAuth = mockCli + + ok := cacheimpls.VerifyAppCodeAppSecretFromAuth("app", "123") + assert.False(GinkgoT(), ok) + + }) + + It("miss, get from bkauth invalid", func() { + mockCli.EXPECT().Verify(gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes() + component.BkAuth = mockCli + + ok := cacheimpls.VerifyAppCodeAppSecretFromAuth("app", "123") + assert.True(GinkgoT(), ok) + }) + }) +}) diff --git a/pkg/cacheimpls/resource_type_test.go b/pkg/cacheimpls/resource_type_test.go index 3e5bc023..a03d5e46 100644 --- a/pkg/cacheimpls/resource_type_test.go +++ b/pkg/cacheimpls/resource_type_test.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" diff --git a/pkg/cacheimpls/subject_group_test.go b/pkg/cacheimpls/subject_group_test.go index b679baa0..de154db8 100644 --- a/pkg/cacheimpls/subject_group_test.go +++ b/pkg/cacheimpls/subject_group_test.go @@ -17,9 +17,9 @@ import ( "github.com/TencentBlueKing/gopkg/cache" "github.com/TencentBlueKing/gopkg/conv" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/cache/redis" diff --git a/pkg/cacheimpls/subject_pk_test.go b/pkg/cacheimpls/subject_pk_test.go index 2b5ecc8d..48d9119b 100644 --- a/pkg/cacheimpls/subject_pk_test.go +++ b/pkg/cacheimpls/subject_pk_test.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" diff --git a/pkg/cacheimpls/system_test.go b/pkg/cacheimpls/system_test.go index 57a60495..2f4018f8 100644 --- a/pkg/cacheimpls/system_test.go +++ b/pkg/cacheimpls/system_test.go @@ -19,7 +19,7 @@ import ( "iam/pkg/service/mock" svctypes "iam/pkg/service/types" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) diff --git a/pkg/component/auth.go b/pkg/component/auth.go index 30170893..98d718d8 100644 --- a/pkg/component/auth.go +++ b/pkg/component/auth.go @@ -24,6 +24,8 @@ import ( "iam/pkg/logging" ) +//go:generate mockgen -source=$GOFILE -destination=./mock/$GOFILE -package=mock + // AuthResponse is the struct of iam backend response type AuthResponse struct { Code int `json:"code"` diff --git a/pkg/component/component_suite_test.go b/pkg/component/component_suite_test.go index 55723731..ce87b165 100644 --- a/pkg/component/component_suite_test.go +++ b/pkg/component/component_suite_test.go @@ -13,7 +13,7 @@ package component_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/component/init_test.go b/pkg/component/init_test.go index 800704c1..ca9c3561 100644 --- a/pkg/component/init_test.go +++ b/pkg/component/init_test.go @@ -13,7 +13,7 @@ package component import ( "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/parnurzeal/gorequest" "iam/pkg/config" diff --git a/pkg/component/mock/auth.go b/pkg/component/mock/auth.go new file mode 100644 index 00000000..14c43d12 --- /dev/null +++ b/pkg/component/mock/auth.go @@ -0,0 +1,48 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: auth.go + +// Package mock is a generated GoMock package. +package mock + +import ( + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockAuthClient is a mock of AuthClient interface +type MockAuthClient struct { + ctrl *gomock.Controller + recorder *MockAuthClientMockRecorder +} + +// MockAuthClientMockRecorder is the mock recorder for MockAuthClient +type MockAuthClientMockRecorder struct { + mock *MockAuthClient +} + +// NewMockAuthClient creates a new mock instance +func NewMockAuthClient(ctrl *gomock.Controller) *MockAuthClient { + mock := &MockAuthClient{ctrl: ctrl} + mock.recorder = &MockAuthClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAuthClient) EXPECT() *MockAuthClientMockRecorder { + return m.recorder +} + +// Verify mocks base method +func (m *MockAuthClient) Verify(appCode, appSecret string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Verify", appCode, appSecret) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Verify indicates an expected call of Verify +func (mr *MockAuthClientMockRecorder) Verify(appCode, appSecret interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockAuthClient)(nil).Verify), appCode, appSecret) +} diff --git a/pkg/database/dao/expression.go b/pkg/database/dao/expression.go index ebf4b737..83370361 100644 --- a/pkg/database/dao/expression.go +++ b/pkg/database/dao/expression.go @@ -49,6 +49,12 @@ type ExpressionManager interface { BulkCreateWithTx(tx *sqlx.Tx, expressions []Expression) ([]int64, error) // 返回批量创建的last id BulkUpdateWithTx(tx *sqlx.Tx, expressions []Expression) error BulkDeleteByPKsWithTx(tx *sqlx.Tx, pks []int64) (int64, error) + + // for task + + ChangeUnreferencedExpressionType(fromType int64, toType int64) error + ChangeReferencedExpressionTypeBeforeUpdateAt(fromType int64, toType int64, updatedAt int64) error + DeleteUnreferencedExpressionByTypeBeforeUpdateAt(_type int64, updatedAt int64) error } type expressionManager struct { @@ -112,6 +118,25 @@ func (m *expressionManager) BulkDeleteByPKsWithTx(tx *sqlx.Tx, pks []int64) (int return m.bulkDeleteByPKsWithTx(tx, pks) } +// ChangeUnreferencedExpressionType 更新未引用的expression的type字段 +func (m *expressionManager) ChangeUnreferencedExpressionType(fromType int64, toType int64) error { + return m.updateUnreferencedExpressionType(fromType, toType) +} + +// ChangeReferencedExpressionTypeBeforeUpdateAt 更新有引用的expression的type字段 +func (m *expressionManager) ChangeReferencedExpressionTypeBeforeUpdateAt( + fromType int64, toType int64, updatedAt int64, +) error { + return m.updateReferencedExpressionTypeBeforeUpdateAt(fromType, toType, updatedAt) +} + +// DeleteUnreferencedExpressionByTypeBeforeUpdateAt 删除未被引用的expression +func (m *expressionManager) DeleteUnreferencedExpressionByTypeBeforeUpdateAt( + _type int64, updatedAt int64, +) error { + return m.deleteUnreferencedExpressionByTypeBeforeUpdateAt(_type, updatedAt) +} + func (m *expressionManager) selectAuthByPKs(expressions *[]AuthExpression, pks []int64) error { query := `SELECT pk, @@ -164,3 +189,32 @@ func (m *expressionManager) bulkDeleteByPKsWithTx(tx *sqlx.Tx, pks []int64) (int sql := `DELETE FROM expression WHERE pk IN (?)` return database.SqlxDeleteReturnRowsWithTx(tx, sql, pks) } + +func (m *expressionManager) updateUnreferencedExpressionType(fromType int64, toType int64) error { + sql := `UPDATE expression SET + type=? + WHERE type=? + AND pk NOT IN (SELECT expression_pk FROM policy)` + return database.SqlxExec(m.DB, sql, toType, fromType) +} + +func (m *expressionManager) updateReferencedExpressionTypeBeforeUpdateAt( + fromType int64, toType int64, updatedAt int64, +) error { + sql := `UPDATE expression SET + type=? + WHERE type=? + AND updated_at < FROM_UNIXTIME(?) + AND pk IN (SELECT expression_pk FROM policy)` + return database.SqlxExec(m.DB, sql, toType, fromType, updatedAt) +} + +func (m *expressionManager) deleteUnreferencedExpressionByTypeBeforeUpdateAt( + _type int64, updatedAt int64, +) error { + sql := `DELETE FROM expression + WHERE type=? + AND updated_at < FROM_UNIXTIME(?) + AND pk NOT IN (SELECT expression_pk FROM policy)` + return database.SqlxExec(m.DB, sql, _type, updatedAt) +} diff --git a/pkg/database/dao/expression_test.go b/pkg/database/dao/expression_test.go index eb5ea47e..1a9951b9 100644 --- a/pkg/database/dao/expression_test.go +++ b/pkg/database/dao/expression_test.go @@ -154,3 +154,63 @@ func Test_expressionManager_ListDistinctBySignaturesType(t *testing.T) { assert.Equal(t, mockData[1].(Expression), expressions[1]) }) } + +func Test_expressionManager_ChangeUnreferencedExpressionType(t *testing.T) { + database.RunWithMock(t, func(db *sqlx.DB, mock sqlmock.Sqlmock, t *testing.T) { + mock.ExpectBegin() + mock.ExpectExec(`UPDATE expression SET type=`).WithArgs( + int64(-1), int64(1), + ).WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() + + tx, err := db.Beginx() + assert.NoError(t, err) + + manager := &expressionManager{DB: db} + err = manager.ChangeUnreferencedExpressionType(1, -1) + + tx.Commit() + + assert.NoError(t, err) + }) +} + +func Test_expressionManager_ChangeReferencedExpressionTypeBeforeUpdateAt(t *testing.T) { + database.RunWithMock(t, func(db *sqlx.DB, mock sqlmock.Sqlmock, t *testing.T) { + mock.ExpectBegin() + mock.ExpectExec(`UPDATE expression SET type=`).WithArgs( + int64(1), int64(-1), int64(0), + ).WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() + + tx, err := db.Beginx() + assert.NoError(t, err) + + manager := &expressionManager{DB: db} + err = manager.ChangeReferencedExpressionTypeBeforeUpdateAt(-1, 1, 0) + + tx.Commit() + + assert.NoError(t, err) + }) +} + +func Test_expressionManager_DeleteUnreferencedExpressionByTypeBeforeUpdateAt(t *testing.T) { + database.RunWithMock(t, func(db *sqlx.DB, mock sqlmock.Sqlmock, t *testing.T) { + mock.ExpectBegin() + mock.ExpectExec(`DELETE FROM expression WHERE type=`).WithArgs( + int64(-1), int64(0), + ).WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() + + tx, err := db.Beginx() + assert.NoError(t, err) + + manager := &expressionManager{DB: db} + err = manager.DeleteUnreferencedExpressionByTypeBeforeUpdateAt(-1, 0) + + tx.Commit() + + assert.NoError(t, err) + }) +} diff --git a/pkg/database/dao/mock/expression.go b/pkg/database/dao/mock/expression.go index 1aee6df8..053fadfd 100644 --- a/pkg/database/dao/mock/expression.go +++ b/pkg/database/dao/mock/expression.go @@ -107,3 +107,45 @@ func (mr *MockExpressionManagerMockRecorder) BulkDeleteByPKsWithTx(tx, pks inter mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BulkDeleteByPKsWithTx", reflect.TypeOf((*MockExpressionManager)(nil).BulkDeleteByPKsWithTx), tx, pks) } + +// ChangeUnreferencedExpressionType mocks base method +func (m *MockExpressionManager) ChangeUnreferencedExpressionType(fromType, toType int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChangeUnreferencedExpressionType", fromType, toType) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChangeUnreferencedExpressionType indicates an expected call of ChangeUnreferencedExpressionType +func (mr *MockExpressionManagerMockRecorder) ChangeUnreferencedExpressionType(fromType, toType interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangeUnreferencedExpressionType", reflect.TypeOf((*MockExpressionManager)(nil).ChangeUnreferencedExpressionType), fromType, toType) +} + +// ChangeReferencedExpressionTypeBeforeUpdateAt mocks base method +func (m *MockExpressionManager) ChangeReferencedExpressionTypeBeforeUpdateAt(fromType, toType, updatedAt int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChangeReferencedExpressionTypeBeforeUpdateAt", fromType, toType, updatedAt) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChangeReferencedExpressionTypeBeforeUpdateAt indicates an expected call of ChangeReferencedExpressionTypeBeforeUpdateAt +func (mr *MockExpressionManagerMockRecorder) ChangeReferencedExpressionTypeBeforeUpdateAt(fromType, toType, updatedAt interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangeReferencedExpressionTypeBeforeUpdateAt", reflect.TypeOf((*MockExpressionManager)(nil).ChangeReferencedExpressionTypeBeforeUpdateAt), fromType, toType, updatedAt) +} + +// DeleteUnreferencedExpressionByTypeBeforeUpdateAt mocks base method +func (m *MockExpressionManager) DeleteUnreferencedExpressionByTypeBeforeUpdateAt(_type, updatedAt int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteUnreferencedExpressionByTypeBeforeUpdateAt", _type, updatedAt) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteUnreferencedExpressionByTypeBeforeUpdateAt indicates an expected call of DeleteUnreferencedExpressionByTypeBeforeUpdateAt +func (mr *MockExpressionManagerMockRecorder) DeleteUnreferencedExpressionByTypeBeforeUpdateAt(_type, updatedAt interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUnreferencedExpressionByTypeBeforeUpdateAt", reflect.TypeOf((*MockExpressionManager)(nil).DeleteUnreferencedExpressionByTypeBeforeUpdateAt), _type, updatedAt) +} diff --git a/pkg/database/dao/mock/model_change_event.go b/pkg/database/dao/mock/model_change_event.go index 74c8e9b1..8e24574f 100644 --- a/pkg/database/dao/mock/model_change_event.go +++ b/pkg/database/dao/mock/model_change_event.go @@ -49,18 +49,18 @@ func (mr *MockModelChangeEventManagerMockRecorder) GetByTypeModel(eventType, sta } // ListByStatus mocks base method -func (m *MockModelChangeEventManager) ListByStatus(status string) ([]dao.ModelChangeEvent, error) { +func (m *MockModelChangeEventManager) ListByStatus(status string, limit int64) ([]dao.ModelChangeEvent, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListByStatus", status) + ret := m.ctrl.Call(m, "ListByStatus", status, limit) ret0, _ := ret[0].([]dao.ModelChangeEvent) ret1, _ := ret[1].(error) return ret0, ret1 } // ListByStatus indicates an expected call of ListByStatus -func (mr *MockModelChangeEventManagerMockRecorder) ListByStatus(status interface{}) *gomock.Call { +func (mr *MockModelChangeEventManagerMockRecorder) ListByStatus(status, limit interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByStatus", reflect.TypeOf((*MockModelChangeEventManager)(nil).ListByStatus), status) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByStatus", reflect.TypeOf((*MockModelChangeEventManager)(nil).ListByStatus), status, limit) } // UpdateStatusByPK mocks base method @@ -90,3 +90,17 @@ func (mr *MockModelChangeEventManagerMockRecorder) BulkCreate(modelChangeEvents mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BulkCreate", reflect.TypeOf((*MockModelChangeEventManager)(nil).BulkCreate), modelChangeEvents) } + +// UpdateStatusByModel mocks base method +func (m *MockModelChangeEventManager) UpdateStatusByModel(eventType, modelType string, modelPK int64, status string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateStatusByModel", eventType, modelType, modelPK, status) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateStatusByModel indicates an expected call of UpdateStatusByModel +func (mr *MockModelChangeEventManagerMockRecorder) UpdateStatusByModel(eventType, modelType, modelPK, status interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateStatusByModel", reflect.TypeOf((*MockModelChangeEventManager)(nil).UpdateStatusByModel), eventType, modelType, modelPK, status) +} diff --git a/pkg/database/dao/model_change_event.go b/pkg/database/dao/model_change_event.go index 8f4ac5d1..4fc7ca0d 100644 --- a/pkg/database/dao/model_change_event.go +++ b/pkg/database/dao/model_change_event.go @@ -38,7 +38,7 @@ type ModelChangeEvent struct { // ModelChangeEventManager define the event crud for model change type ModelChangeEventManager interface { GetByTypeModel(eventType, status, modelType string, modelPK int64) (ModelChangeEvent, error) - ListByStatus(status string) ([]ModelChangeEvent, error) + ListByStatus(status string, limit int64) ([]ModelChangeEvent, error) UpdateStatusByPK(pk int64, status string) error BulkCreate(modelChangeEvents []ModelChangeEvent) error UpdateStatusByModel(eventType, modelType string, modelPK int64, status string) error @@ -66,8 +66,10 @@ func (m *modelChangeEventManager) GetByTypeModel(eventType, status, modelType st } // ListByStatus ... -func (m *modelChangeEventManager) ListByStatus(status string) (modelChangeEvents []ModelChangeEvent, err error) { - err = m.selectByStatus(&modelChangeEvents, status) +func (m *modelChangeEventManager) ListByStatus( + status string, limit int64, +) (modelChangeEvents []ModelChangeEvent, err error) { + err = m.selectByStatus(&modelChangeEvents, status, limit) if errors.Is(err, sql.ErrNoRows) { return modelChangeEvents, nil } @@ -115,8 +117,12 @@ func (m *modelChangeEventManager) UpdateStatusByModel(eventType, modelType strin return m.update(updatedSQL, data) } -func (m *modelChangeEventManager) selectOne(modelChangeEvent *ModelChangeEvent, eventType, status, modelType string, - modelPK int64) error { +func (m *modelChangeEventManager) selectOne( + modelChangeEvent *ModelChangeEvent, + eventType, + status, modelType string, + modelPK int64, +) error { query := `SELECT pk, type, @@ -134,7 +140,9 @@ func (m *modelChangeEventManager) selectOne(modelChangeEvent *ModelChangeEvent, return database.SqlxGet(m.DB, modelChangeEvent, query, eventType, status, modelType, modelPK) } -func (m *modelChangeEventManager) selectByStatus(modelChangeEvents *[]ModelChangeEvent, status string) error { +func (m *modelChangeEventManager) selectByStatus( + modelChangeEvents *[]ModelChangeEvent, status string, limit int64, +) error { query := `SELECT pk, type, @@ -144,8 +152,9 @@ func (m *modelChangeEventManager) selectByStatus(modelChangeEvents *[]ModelChang model_id, model_pk FROM model_change_event - WHERE status=?` - return database.SqlxSelect(m.DB, modelChangeEvents, query, status) + WHERE status=? + LIMIT ?` + return database.SqlxSelect(m.DB, modelChangeEvents, query, status, limit) } func (m *modelChangeEventManager) update(updatedSQL string, data map[string]interface{}) error { diff --git a/pkg/database/database_suite_test.go b/pkg/database/database_suite_test.go index 385c55a0..589c203f 100644 --- a/pkg/database/database_suite_test.go +++ b/pkg/database/database_suite_test.go @@ -13,7 +13,7 @@ package database_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/database/edao/app_secret.go b/pkg/database/edao/app_secret.go index dec9a928..43d6f8f1 100644 --- a/pkg/database/edao/app_secret.go +++ b/pkg/database/edao/app_secret.go @@ -19,6 +19,8 @@ import ( "iam/pkg/database" ) +//go:generate mockgen -source=$GOFILE -destination=./mock/$GOFILE -package=mock + // BKPaaSApp ... type BKPaaSApp struct { Code string `db:"code"` diff --git a/pkg/database/edao/mock/app_secret.go b/pkg/database/edao/mock/app_secret.go new file mode 100644 index 00000000..f1bf1cf5 --- /dev/null +++ b/pkg/database/edao/mock/app_secret.go @@ -0,0 +1,48 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: app_secret.go + +// Package mock is a generated GoMock package. +package mock + +import ( + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockAppSecretManager is a mock of AppSecretManager interface +type MockAppSecretManager struct { + ctrl *gomock.Controller + recorder *MockAppSecretManagerMockRecorder +} + +// MockAppSecretManagerMockRecorder is the mock recorder for MockAppSecretManager +type MockAppSecretManagerMockRecorder struct { + mock *MockAppSecretManager +} + +// NewMockAppSecretManager creates a new mock instance +func NewMockAppSecretManager(ctrl *gomock.Controller) *MockAppSecretManager { + mock := &MockAppSecretManager{ctrl: ctrl} + mock.recorder = &MockAppSecretManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAppSecretManager) EXPECT() *MockAppSecretManagerMockRecorder { + return m.recorder +} + +// Exists mocks base method +func (m *MockAppSecretManager) Exists(appCode, appSecret string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Exists", appCode, appSecret) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Exists indicates an expected call of Exists +func (mr *MockAppSecretManagerMockRecorder) Exists(appCode, appSecret interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exists", reflect.TypeOf((*MockAppSecretManager)(nil).Exists), appCode, appSecret) +} diff --git a/pkg/database/sdao/saas_action.go b/pkg/database/sdao/saas_action.go index ca771566..1b970bc9 100644 --- a/pkg/database/sdao/saas_action.go +++ b/pkg/database/sdao/saas_action.go @@ -26,16 +26,17 @@ import ( type SaaSAction struct { database.AllowBlankFields - PK int64 `db:"pk"` - System string `db:"system_id"` - ID string `db:"id"` - Name string `db:"name"` - NameEn string `db:"name_en"` - Description string `db:"description"` - DescriptionEn string `db:"description_en"` - RelatedActions string `db:"related_actions"` - Type string `db:"type"` - Version int64 `db:"version"` + PK int64 `db:"pk"` + System string `db:"system_id"` + ID string `db:"id"` + Name string `db:"name"` + NameEn string `db:"name_en"` + Description string `db:"description"` + DescriptionEn string `db:"description_en"` + RelatedActions string `db:"related_actions"` + RelatedEnvironments string `db:"related_environments"` + Type string `db:"type"` + Version int64 `db:"version"` } // SaaSActionManager ... @@ -118,9 +119,11 @@ func (m *saasActionManager) bulkInsertWithTx(tx *sqlx.Tx, saasActions []SaaSActi description, description_en, related_actions, + related_environments, type, version - ) VALUES (:system_id, :id, :name, :name_en, :description, :description_en, :related_actions, :type, :version)` + ) VALUES (:system_id, :id, :name, :name_en, :description, :description_en, + :related_actions, :related_environments, :type, :version)` return database.SqlxBulkInsertWithTx(tx, query, saasActions) } @@ -147,6 +150,7 @@ func (m *saasActionManager) selectBySystem(saasAction *[]SaaSAction, system stri description, description_en, related_actions, + related_environments, type, version FROM saas_action diff --git a/pkg/database/sqlx.go b/pkg/database/sqlx.go index 2f6b2c80..57708d15 100644 --- a/pkg/database/sqlx.go +++ b/pkg/database/sqlx.go @@ -72,6 +72,16 @@ func bulkInsertTimer(f bulkInsertFunc) bulkInsertFunc { } } +type execFunc func(db *sqlx.DB, query string, args ...interface{}) error + +func execTimer(f execFunc) execFunc { + return func(db *sqlx.DB, query string, args ...interface{}) error { + start := time.Now() + defer logSlowSQL(start, query, args) + return f(db, query, args...) + } +} + // ================== raw execute func ================== func sqlxSelectFunc(db *sqlx.DB, dest interface{}, query string, args ...interface{}) error { query, args, err := sqlx.In(query, args...) @@ -175,6 +185,11 @@ func sqlxBulkUpdateFunc(db *sqlx.DB, query string, args interface{}) error { return tx.Commit() } +func sqlxExecFunc(db *sqlx.DB, query string, args ...interface{}) error { + _, err := db.Exec(query, args...) + return err +} + // ============== timer with tx ============== type insertWithTxFunc func(tx *sqlx.Tx, query string, args interface{}) error @@ -388,6 +403,7 @@ var ( SqlxUpdate = updateTimer(sqlxUpdateFunc) SqlxBulkInsert = bulkInsertTimer(sqlxBulkInsertFunc) SqlxBulkUpdate = bulkInsertTimer(sqlxBulkUpdateFunc) + SqlxExec = execTimer(sqlxExecFunc) SqlxDeleteWithCtx = deleteWithCtxTimer(sqlxDeleteWithCtxFunc) SqlxInsertWithTx = insertWithTxTimer(sqlxInsertWithTx) diff --git a/pkg/database/utils_test.go b/pkg/database/utils_test.go index 41dae6ff..895fd2f2 100644 --- a/pkg/database/utils_test.go +++ b/pkg/database/utils_test.go @@ -17,7 +17,7 @@ import ( "github.com/TencentBlueKing/gopkg/stringx" jsoniter "github.com/json-iterator/go" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/util" diff --git a/pkg/middleware/client.go b/pkg/middleware/client.go index 462eca61..2a301658 100644 --- a/pkg/middleware/client.go +++ b/pkg/middleware/client.go @@ -33,11 +33,11 @@ func NewClientAuthMiddleware(c *config.Config) gin.HandlerFunc { apiGatewayPublicKey = []byte(apigwCrypto.Key) } - return ClientAuthMiddleware(apiGatewayPublicKey, c.EnableBkAuth) + return ClientAuthMiddleware(apiGatewayPublicKey) } // ClientAuthMiddleware ... -func ClientAuthMiddleware(apiGatewayPublicKey []byte, enableBkAuth bool) gin.HandlerFunc { +func ClientAuthMiddleware(apiGatewayPublicKey []byte) gin.HandlerFunc { return func(c *gin.Context) { log.Debug("Middleware: ClientAuthMiddleware") @@ -80,12 +80,7 @@ func ClientAuthMiddleware(apiGatewayPublicKey []byte, enableBkAuth bool) gin.Han } // 2. validate from cache -> database - var valid bool - if enableBkAuth { - valid = cacheimpls.VerifyAppCodeAppSecretFromAuth(appCode, appSecret) - } else { - valid = cacheimpls.VerifyAppCodeAppSecret(appCode, appSecret) - } + valid := cacheimpls.VerifyAppCodeAppSecret(appCode, appSecret) if !valid { util.UnauthorizedJSONResponse(c, "app code or app secret wrong") c.Abort() diff --git a/pkg/middleware/client_jwt_test.go b/pkg/middleware/client_jwt_test.go index 8612527c..d30e3bc0 100644 --- a/pkg/middleware/client_jwt_test.go +++ b/pkg/middleware/client_jwt_test.go @@ -15,9 +15,9 @@ import ( "strings" "testing" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang-jwt/jwt/v4" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/cacheimpls" diff --git a/pkg/middleware/client_test.go b/pkg/middleware/client_test.go index dbebeb7f..ab09fb03 100644 --- a/pkg/middleware/client_test.go +++ b/pkg/middleware/client_test.go @@ -20,6 +20,7 @@ import ( "github.com/gin-gonic/gin" "github.com/stretchr/testify/assert" + "iam/pkg/cacheimpls" "iam/pkg/config" "iam/pkg/util" ) @@ -27,9 +28,11 @@ import ( func TestClientAuthMiddleware(t *testing.T) { t.Parallel() + cacheimpls.InitVerifyAppCodeAppSecret(false) + // 1. without appCode appSecret r := gin.Default() - r.Use(ClientAuthMiddleware([]byte(""), false)) + r.Use(ClientAuthMiddleware([]byte(""))) util.NewTestRouter(r) req, _ := http.NewRequest("GET", "/ping", nil) diff --git a/pkg/middleware/metrics.go b/pkg/middleware/metrics.go index b3b6c247..b90c0943 100644 --- a/pkg/middleware/metrics.go +++ b/pkg/middleware/metrics.go @@ -36,12 +36,17 @@ func Metrics() gin.HandlerFunc { clientID := util.GetClientID(c) status := strconv.Itoa(c.Writer.Status()) + e := "0" + if _, hasError := util.GetError(c); hasError { + e = "1" + } + // request count metric.RequestCount.With(prometheus.Labels{ "method": c.Request.Method, "path": c.Request.URL.Path, "status": status, - "error": "0", + "error": e, "client_id": clientID, }).Inc() diff --git a/pkg/middleware/middleware_suite_test.go b/pkg/middleware/middleware_suite_test.go index e8b5f5a1..c5b9d131 100644 --- a/pkg/middleware/middleware_suite_test.go +++ b/pkg/middleware/middleware_suite_test.go @@ -13,7 +13,7 @@ package middleware_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/service/action.go b/pkg/service/action.go index e3220636..6180bcab 100644 --- a/pkg/service/action.go +++ b/pkg/service/action.go @@ -133,6 +133,14 @@ func (l *actionService) Get(system, actionID string) (types.Action, error) { Type: dbAction.Type, Version: dbAction.Version, } + + if dbAction.RelatedEnvironments != "" { + err = jsoniter.UnmarshalFromString(dbAction.RelatedEnvironments, &action.RelatedEnvironments) + if err != nil { + return action, errorWrapf(err, "unmarshal action.RelatedEnvironments=`%+v` fail", dbAction.RelatedEnvironments) + } + } + relatedResourceTypes := []types.ActionResourceType{} for idx := range dbActionResourceTypes { @@ -203,6 +211,12 @@ func (l *actionService) ListBySystem(system string) ([]types.Action, error) { return nil, errorWrapf(err, "unmarshal action.RelatedActions=`%+v` fail", ac.RelatedActions) } } + if ac.RelatedEnvironments != "" { + err = jsoniter.UnmarshalFromString(ac.RelatedEnvironments, &action.RelatedEnvironments) + if err != nil { + return nil, errorWrapf(err, "unmarshal action.RelatedEnvironments=`%+v` fail", ac.RelatedEnvironments) + } + } relatedResourceTypes := []types.ActionResourceType{} _, ok := actionResourceTypeMap[ac.ID] @@ -295,17 +309,22 @@ func (l *actionService) BulkCreate(system string, actions []types.Action) error if err1 != nil { return errorWrapf(err1, "marshal action.RelatedActions=`%+v` fail", ac.RelatedActions) } + relatedEnvironments, err2 := jsoniter.MarshalToString(ac.RelatedEnvironments) + if err2 != nil { + return errorWrapf(err1, "marshal action.RelatedEnvironments=`%+v` fail", ac.RelatedEnvironments) + } dbSaaSActions = append(dbSaaSActions, sdao.SaaSAction{ - System: system, - ID: ac.ID, - Name: ac.Name, - NameEn: ac.NameEn, - Description: ac.Description, - DescriptionEn: ac.DescriptionEn, - RelatedActions: relatedActions, - Type: ac.Type, - Version: ac.Version, + System: system, + ID: ac.ID, + Name: ac.Name, + NameEn: ac.NameEn, + Description: ac.Description, + DescriptionEn: ac.DescriptionEn, + RelatedActions: relatedActions, + RelatedEnvironments: relatedEnvironments, + Type: ac.Type, + Version: ac.Version, }) singleDBActionResourceTypes, singleDBSaaSActionResourceTypes, err1 := l.convertToDBRelatedResourceTypes(system, ac) @@ -417,16 +436,27 @@ func (l *actionService) Update(system, actionID string, action types.Action) err return errorWrapf(err, "unmarshal action.RelatedActions=`%+v` fail", action.RelatedActions) } } + var relatedEnvironments string + if action.AllowEmptyFields.HasKey("RelatedEnvironments") { + allowBlank.AddKey("RelatedEnvironments") + + var err1 error + relatedEnvironments, err1 = jsoniter.MarshalToString(action.RelatedEnvironments) + if err1 != nil { + return errorWrapf(err, "unmarshal action.RelatedEnvironments=`%+v` fail", action.RelatedEnvironments) + } + } // 4. update saas action data := sdao.SaaSAction{ - Name: action.Name, - NameEn: action.NameEn, - Description: action.Description, - DescriptionEn: action.DescriptionEn, - Type: action.Type, - Version: action.Version, - RelatedActions: relatedActions, + Name: action.Name, + NameEn: action.NameEn, + Description: action.Description, + DescriptionEn: action.DescriptionEn, + Type: action.Type, + Version: action.Version, + RelatedActions: relatedActions, + RelatedEnvironments: relatedEnvironments, AllowBlankFields: allowBlank, } diff --git a/pkg/service/action_resource_type_test.go b/pkg/service/action_resource_type_test.go index f0ccc0e8..4cf2deea 100644 --- a/pkg/service/action_resource_type_test.go +++ b/pkg/service/action_resource_type_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "iam/pkg/database/dao" "iam/pkg/database/dao/mock" diff --git a/pkg/service/action_test.go b/pkg/service/action_test.go index 4dc798b8..0738fcba 100644 --- a/pkg/service/action_test.go +++ b/pkg/service/action_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "iam/pkg/database/dao/mock" ) diff --git a/pkg/service/action_thin_test.go b/pkg/service/action_thin_test.go index 549924cf..4346d0ab 100644 --- a/pkg/service/action_thin_test.go +++ b/pkg/service/action_thin_test.go @@ -17,7 +17,7 @@ import ( "iam/pkg/service/types" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/service/engine_policy_test.go b/pkg/service/engine_policy_test.go index a5c32e05..01bfe012 100644 --- a/pkg/service/engine_policy_test.go +++ b/pkg/service/engine_policy_test.go @@ -15,7 +15,7 @@ import ( "time" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/database/dao" diff --git a/pkg/service/mock/model_change_event.go b/pkg/service/mock/model_change_event.go index 075d778d..7e1e6f1c 100644 --- a/pkg/service/mock/model_change_event.go +++ b/pkg/service/mock/model_change_event.go @@ -34,18 +34,18 @@ func (m *MockModelChangeEventService) EXPECT() *MockModelChangeEventServiceMockR } // ListByStatus mocks base method -func (m *MockModelChangeEventService) ListByStatus(status string) ([]types.ModelChangeEvent, error) { +func (m *MockModelChangeEventService) ListByStatus(status string, limit int64) ([]types.ModelChangeEvent, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListByStatus", status) + ret := m.ctrl.Call(m, "ListByStatus", status, limit) ret0, _ := ret[0].([]types.ModelChangeEvent) ret1, _ := ret[1].(error) return ret0, ret1 } // ListByStatus indicates an expected call of ListByStatus -func (mr *MockModelChangeEventServiceMockRecorder) ListByStatus(status interface{}) *gomock.Call { +func (mr *MockModelChangeEventServiceMockRecorder) ListByStatus(status, limit interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByStatus", reflect.TypeOf((*MockModelChangeEventService)(nil).ListByStatus), status) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByStatus", reflect.TypeOf((*MockModelChangeEventService)(nil).ListByStatus), status, limit) } // UpdateStatusByPK mocks base method @@ -62,6 +62,20 @@ func (mr *MockModelChangeEventServiceMockRecorder) UpdateStatusByPK(pk, status i return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateStatusByPK", reflect.TypeOf((*MockModelChangeEventService)(nil).UpdateStatusByPK), pk, status) } +// UpdateStatusByModel mocks base method +func (m *MockModelChangeEventService) UpdateStatusByModel(eventType, modelType string, modelPK int64, status string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateStatusByModel", eventType, modelType, modelPK, status) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateStatusByModel indicates an expected call of UpdateStatusByModel +func (mr *MockModelChangeEventServiceMockRecorder) UpdateStatusByModel(eventType, modelType, modelPK, status interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateStatusByModel", reflect.TypeOf((*MockModelChangeEventService)(nil).UpdateStatusByModel), eventType, modelType, modelPK, status) +} + // BulkCreate mocks base method func (m *MockModelChangeEventService) BulkCreate(modelChangeEvents []types.ModelChangeEvent) error { m.ctrl.T.Helper() diff --git a/pkg/service/mock/policy.go b/pkg/service/mock/policy.go index 983f9141..b985f154 100644 --- a/pkg/service/mock/policy.go +++ b/pkg/service/mock/policy.go @@ -5,12 +5,10 @@ package mock import ( - reflect "reflect" - - "github.com/TencentBlueKing/gopkg/collection/set" + set "github.com/TencentBlueKing/gopkg/collection/set" gomock "github.com/golang/mock/gomock" - types "iam/pkg/service/types" + reflect "reflect" ) // MockPolicyService is a mock of PolicyService interface @@ -284,3 +282,17 @@ func (mr *MockPolicyServiceMockRecorder) HasAnyByActionPK(actionPK interface{}) mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasAnyByActionPK", reflect.TypeOf((*MockPolicyService)(nil).HasAnyByActionPK), actionPK) } + +// DeleteUnreferencedExpressions mocks base method +func (m *MockPolicyService) DeleteUnreferencedExpressions() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteUnreferencedExpressions") + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteUnreferencedExpressions indicates an expected call of DeleteUnreferencedExpressions +func (mr *MockPolicyServiceMockRecorder) DeleteUnreferencedExpressions() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUnreferencedExpressions", reflect.TypeOf((*MockPolicyService)(nil).DeleteUnreferencedExpressions)) +} diff --git a/pkg/service/model_change_event.go b/pkg/service/model_change_event.go index 4c201673..191a9d23 100644 --- a/pkg/service/model_change_event.go +++ b/pkg/service/model_change_event.go @@ -23,7 +23,7 @@ const ModelChangeEventSVC = "ModelChangeEventSVC" // ModelChangeEventService define the interface for model change type ModelChangeEventService interface { - ListByStatus(status string) ([]types.ModelChangeEvent, error) + ListByStatus(status string, limit int64) ([]types.ModelChangeEvent, error) UpdateStatusByPK(pk int64, status string) error UpdateStatusByModel(eventType, modelType string, modelPK int64, status string) error BulkCreate(modelChangeEvents []types.ModelChangeEvent) error @@ -42,10 +42,12 @@ func NewModelChangeService() ModelChangeEventService { } // ListByStatus ... -func (l *modelChangeEventService) ListByStatus(status string) (modelChangeEvents []types.ModelChangeEvent, err error) { +func (l *modelChangeEventService) ListByStatus( + status string, limit int64, +) (modelChangeEvents []types.ModelChangeEvent, err error) { errorWrapf := errorx.NewLayerFunctionErrorWrapf(ModelChangeEventSVC, "ListByStatus") - dbModelChangeEvents, err := l.manager.ListByStatus(status) + dbModelChangeEvents, err := l.manager.ListByStatus(status, limit) if err != nil { return modelChangeEvents, errorWrapf(err, "ListByStatus(status=%s) fail", status) } diff --git a/pkg/service/policy.go b/pkg/service/policy.go index b4b14436..f0239dd9 100644 --- a/pkg/service/policy.go +++ b/pkg/service/policy.go @@ -36,6 +36,8 @@ const ( expressionTypeCustom int64 = 0 // 自定义的expression类型 expressionTypeTemplate int64 = 1 // 模板的expression类型 + expressionTypeUnreferenced int64 = -1 // 未被引用的模板expression类型 + expressionPKActionWithoutResource = -1 // 操作不关联资源时的expression pk // PolicyTemplateIDCustom template id for custom policy @@ -84,6 +86,9 @@ type PolicyService interface { // for model update HasAnyByActionPK(actionPK int64) (bool, error) + + // for expression clean task + DeleteUnreferencedExpressions() error } type policyService struct { @@ -810,3 +815,34 @@ func (s *policyService) DeleteByActionPK(actionPK int64) error { } return err } + +// DeleteUnquotedExpressions 删除未被引用的expression +func (s *policyService) DeleteUnreferencedExpressions() error { + errorWrapf := errorx.NewLayerFunctionErrorWrapf(PolicySVC, "DeleteUnquotedExpression") + updateAt := time.Now().Unix() - 24*60*60 // 取前一天的时间戳 + + // 1. 更新被引用但是标记为未引用的expression + err := s.expressionManger.ChangeReferencedExpressionTypeBeforeUpdateAt( + expressionTypeUnreferenced, expressionTypeTemplate, updateAt) + if err != nil { + return errorWrapf(err, "expressionManger.ChangeReferencedExpressionTypeBeforeUpdateAt "+ + "fromType=`%d`, toType=`%d`, updateAt=`%d`", + expressionTypeUnreferenced, expressionTypeTemplate, updateAt) + } + + // 2. 删除标记未被引用的expression + err = s.expressionManger.DeleteUnreferencedExpressionByTypeBeforeUpdateAt(expressionTypeUnreferenced, updateAt) + if err != nil { + return errorWrapf(err, "expressionManger.DeleteByTypeBeforeUpdateAt type=`%d`, updateAt=`%d`", + expressionTypeUnreferenced, updateAt) + } + + // 3. 标记未被引用的expression + err = s.expressionManger.ChangeUnreferencedExpressionType(expressionTypeTemplate, expressionTypeUnreferenced) + if err != nil { + return errorWrapf(err, "expressionManger.ChangeUnreferencedExpressionType fromType=`%d`, toType=`%d`", + expressionTypeTemplate, expressionTypeUnreferenced) + } + + return nil +} diff --git a/pkg/service/policy_test.go b/pkg/service/policy_test.go index 513e3180..492ee286 100644 --- a/pkg/service/policy_test.go +++ b/pkg/service/policy_test.go @@ -14,9 +14,9 @@ import ( "errors" "github.com/TencentBlueKing/gopkg/collection/set" - "github.com/agiledragon/gomonkey" + "github.com/agiledragon/gomonkey/v2" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/database" diff --git a/pkg/service/service_suite_test.go b/pkg/service/service_suite_test.go index 95c355aa..534074e3 100644 --- a/pkg/service/service_suite_test.go +++ b/pkg/service/service_suite_test.go @@ -13,7 +13,7 @@ package service_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/service/subject_group_test.go b/pkg/service/subject_group_test.go index 239bf48a..21dfb38e 100644 --- a/pkg/service/subject_group_test.go +++ b/pkg/service/subject_group_test.go @@ -11,7 +11,7 @@ package service import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" ) var _ = Describe("SubjectService", func() { diff --git a/pkg/service/subject_member_test.go b/pkg/service/subject_member_test.go index 4d4674e1..ea959864 100644 --- a/pkg/service/subject_member_test.go +++ b/pkg/service/subject_member_test.go @@ -14,7 +14,7 @@ import ( "errors" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/database/dao" diff --git a/pkg/service/subject_test.go b/pkg/service/subject_test.go index f0ef9fd6..6a9cf909 100644 --- a/pkg/service/subject_test.go +++ b/pkg/service/subject_test.go @@ -17,7 +17,7 @@ import ( "iam/pkg/service/types" "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) diff --git a/pkg/service/system_config_test.go b/pkg/service/system_config_test.go index bc73d257..d12787a7 100644 --- a/pkg/service/system_config_test.go +++ b/pkg/service/system_config_test.go @@ -12,7 +12,7 @@ package service import ( "github.com/golang/mock/gomock" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/database/sdao" diff --git a/pkg/service/types/action.go b/pkg/service/types/action.go index e54d0b23..b23d792a 100644 --- a/pkg/service/types/action.go +++ b/pkg/service/types/action.go @@ -25,6 +25,11 @@ type ActionResourceType struct { InstanceSelections []map[string]interface{} `json:"instance_selections" structs:"instance_selections"` } +type ActionEnvironment struct { + Type string `json:"type" structs:"type"` + // Operators []string `json:"operators" structs:"operators"` +} + // ReferenceInstanceSelection ... type ReferenceInstanceSelection struct { System string `json:"system_id" structs:"system_id"` @@ -45,6 +50,7 @@ type Action struct { Version int64 `json:"version" structs:"version"` RelatedResourceTypes []ActionResourceType `json:"related_resource_types" structs:"related_resource_types"` RelatedActions []string `json:"related_actions" structs:"related_actions"` + RelatedEnvironments []ActionEnvironment `json:"related_environments" structs:"related_environments"` } // ThinAction ... diff --git a/pkg/service/types/policy_test.go b/pkg/service/types/policy_test.go index d804dfa0..43fb1b3a 100644 --- a/pkg/service/types/policy_test.go +++ b/pkg/service/types/policy_test.go @@ -11,7 +11,7 @@ package types_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/service/types" diff --git a/pkg/service/types/subject_detail_test.go b/pkg/service/types/subject_detail_test.go index c6f1a08a..0c7f8993 100644 --- a/pkg/service/types/subject_detail_test.go +++ b/pkg/service/types/subject_detail_test.go @@ -13,7 +13,7 @@ package types_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "github.com/vmihailenco/msgpack/v5" @@ -173,7 +173,7 @@ func BenchmarkThinSubjectDetail(b *testing.B) { } bs, _ := msgpack.Marshal(&a) - //fmt.Println("size:", len(bs), err) + // fmt.Println("size:", len(bs), err) var x SubjectDetail for i := 0; i < b.N; i++ { @@ -209,11 +209,11 @@ func BenchmarkThinSubjectDetailCustomEncodeDecode(b *testing.B) { } bs, _ := msgpack.Marshal(&a) - //fmt.Println("size:", len(bs), err) + // fmt.Println("size:", len(bs), err) var x types.SubjectDetail for i := 0; i < b.N; i++ { msgpack.Unmarshal(bs, &x) } - //fmt.Printf("+%v", x) + // fmt.Printf("+%v", x) } diff --git a/pkg/service/types/types_suite_test.go b/pkg/service/types/types_suite_test.go index 7f0a208b..7a5c9cfb 100644 --- a/pkg/service/types/types_suite_test.go +++ b/pkg/service/types/types_suite_test.go @@ -13,7 +13,7 @@ package types_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/util/conv.go b/pkg/util/conv.go index cb8e6c44..a29c679f 100644 --- a/pkg/util/conv.go +++ b/pkg/util/conv.go @@ -21,11 +21,6 @@ func Int64SliceToString(s []int64, sep string) string { return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(s)), sep), "[]") } -// StringToInt64 ... -func StringToInt64(i string) (int64, error) { - return strconv.ParseInt(i, 10, 64) -} - // StringToInt64Slice ... func StringToInt64Slice(s, sep string) ([]int64, error) { if s == "" { diff --git a/pkg/util/conv_test.go b/pkg/util/conv_test.go index ffe30112..457c8507 100644 --- a/pkg/util/conv_test.go +++ b/pkg/util/conv_test.go @@ -15,8 +15,7 @@ import ( "sync" "testing" - . "github.com/onsi/ginkgo" - . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/util" @@ -34,21 +33,6 @@ var _ = Describe("Conv", func() { ) }) - Describe("StringToInt64", func() { - - It("ok", func() { - i, err := util.StringToInt64("123") - assert.NoError(GinkgoT(), err) - assert.Equal(GinkgoT(), int64(123), i) - }) - - It("fail", func() { - _, err := util.StringToInt64("abc") - assert.Error(GinkgoT(), err) - }) - - }) - Describe("StringToInt64Slice", func() { DescribeTable("StringToInt64Slice cases", func(expected []int64, willError bool, input string, sep string) { data, err := util.StringToInt64Slice(input, sep) diff --git a/pkg/util/error_test.go b/pkg/util/error_test.go index ce1ae6f9..869c7a45 100644 --- a/pkg/util/error_test.go +++ b/pkg/util/error_test.go @@ -1,7 +1,7 @@ package util_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "iam/pkg/util" ) diff --git a/pkg/util/map_test.go b/pkg/util/map_test.go index 9a69fff6..536be203 100644 --- a/pkg/util/map_test.go +++ b/pkg/util/map_test.go @@ -11,8 +11,7 @@ package util_test import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/util" diff --git a/pkg/util/request_test.go b/pkg/util/request_test.go index 379aa708..b63496b7 100644 --- a/pkg/util/request_test.go +++ b/pkg/util/request_test.go @@ -15,7 +15,7 @@ import ( "net/http" "github.com/gin-gonic/gin" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/util" diff --git a/pkg/util/response_test.go b/pkg/util/response_test.go index e9f042c3..f82ff87e 100644 --- a/pkg/util/response_test.go +++ b/pkg/util/response_test.go @@ -22,7 +22,7 @@ import ( "iam/pkg/util" "github.com/gin-gonic/gin" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" ) @@ -36,15 +36,15 @@ func readResponse(w *httptest.ResponseRecorder) util.Response { var _ = Describe("Response", func() { var c *gin.Context - //var r *gin.Engine + // var r *gin.Engine var w *httptest.ResponseRecorder BeforeEach(func() { w = httptest.NewRecorder() gin.SetMode(gin.ReleaseMode) - //gin.DefaultWriter = ioutil.Discard + // gin.DefaultWriter = ioutil.Discard c, _ = gin.CreateTestContext(w) - //c, r = gin.CreateTestContext(w) - //r.Use(gin.Recovery()) + // c, r = gin.CreateTestContext(w) + // r.Use(gin.Recovery()) }) It("BaseJSONResponse", func() { diff --git a/pkg/util/string_test.go b/pkg/util/string_test.go index d2b570cc..80ba04a1 100644 --- a/pkg/util/string_test.go +++ b/pkg/util/string_test.go @@ -15,8 +15,7 @@ import ( "strconv" "testing" - . "github.com/onsi/ginkgo" - . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/util" diff --git a/pkg/util/testing.go b/pkg/util/testing.go index 3aeca7d5..a3e0f2a7 100644 --- a/pkg/util/testing.go +++ b/pkg/util/testing.go @@ -21,7 +21,7 @@ import ( "strings" "testing" - "github.com/alicebob/miniredis" + "github.com/alicebob/miniredis/v2" "github.com/gin-gonic/gin" "github.com/go-redis/redis/v8" jsoniter "github.com/json-iterator/go" @@ -88,9 +88,9 @@ func SetupRouter() *gin.Engine { r := gin.New() gin.SetMode(gin.ReleaseMode) r.Use(gin.Recovery()) - //r.GET("/ping", func(c *gin.Context) { + // r.GET("/ping", func(c *gin.Context) { // c.String(200, "pong") - //}) + // }) return r } @@ -127,7 +127,7 @@ func CreateTesting500Server() *httptest.Server { // JSONAssertFunc ... type JSONAssertFunc func(map[string]interface{}) error -//type JSONAssertFunc func(Response) error +// type JSONAssertFunc func(Response) error // NewJSONAssertFunc ... func NewJSONAssertFunc(t *testing.T, assertFunc JSONAssertFunc) func(res *http.Response, req *http.Request) error { @@ -138,7 +138,7 @@ func NewJSONAssertFunc(t *testing.T, assertFunc JSONAssertFunc) func(res *http.R defer res.Body.Close() var data map[string]interface{} - //var data Response + // var data Response err = json.Unmarshal(body, &data) assert.NoError(t, err, "unmarshal string to json fail") diff --git a/pkg/util/time_test.go b/pkg/util/time_test.go index 3737df67..2fc14daf 100644 --- a/pkg/util/time_test.go +++ b/pkg/util/time_test.go @@ -13,7 +13,7 @@ package util_test import ( "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/util" diff --git a/pkg/util/util_suite_test.go b/pkg/util/util_suite_test.go index a0943f6b..af6b0ae0 100644 --- a/pkg/util/util_suite_test.go +++ b/pkg/util/util_suite_test.go @@ -13,7 +13,7 @@ package util_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/util/validation_test.go b/pkg/util/validation_test.go index 8dcfe0fb..f2d368ff 100644 --- a/pkg/util/validation_test.go +++ b/pkg/util/validation_test.go @@ -16,7 +16,7 @@ import ( "strings" "github.com/go-playground/validator/v10" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" "github.com/stretchr/testify/assert" "iam/pkg/util" @@ -24,7 +24,7 @@ import ( var _ = Describe("Validation", func() { - //Describe("ValidationFieldError", func() { + // Describe("ValidationFieldError", func() { // DescribeTable("ValidationFieldError cases", func(expected string, err util.ValidationFieldError) { // assert.True(GinkgoT(), strings.Contains(err.String(), expected)) // }, @@ -32,7 +32,7 @@ var _ = Describe("Validation", func() { // Entry("slice with 1 value", "1", []int64{1}, ","), // Entry("slice with 3 values", "1,2,3", []int64{1, 2, 3}, ","), // ) - //}) + // }) Describe("ValidationErrorMessage", func() { diff --git a/pkg/version/version.go b/pkg/version/version.go index af66ece4..d88778f4 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -14,5 +14,5 @@ var ( Version = "0.0.0" Commit = "none" BuildTime = "unknown" - GoVersion = "1.16.X" + GoVersion = "1.17.X" ) diff --git a/readme.md b/readme.md index 0bd6dc87..9375bd10 100644 --- a/readme.md +++ b/readme.md @@ -38,6 +38,7 @@ - [TencentBlueKing/bk-iam](https://github.com/TencentBlueKing/bk-iam) - [TencentBlueKing/bk-iam-saas](https://github.com/TencentBlueKing/bk-iam-saas) - [TencentBlueKing/bk-iam-search-engine](https://github.com/TencentBlueKing/bk-iam-search-engine) +- [TencentBlueKing/bk-iam-cli](https://github.com/TencentBlueKing/bk-iam-cli) - [TencentBlueKing/iam-python-sdk](https://github.com/TencentBlueKing/iam-python-sdk) - [TencentBlueKing/iam-go-sdk](https://github.com/TencentBlueKing/iam-go-sdk) - [TencentBlueKing/iam-php-sdk](https://github.com/TencentBlueKing/iam-php-sdk) diff --git a/readme_en.md b/readme_en.md index 20769a5d..7d099533 100644 --- a/readme_en.md +++ b/readme_en.md @@ -33,10 +33,15 @@ BK-IAM is a universal permission control product developed based on ABAC(a power - [release log](release.md) -## SDK +## IAM Repos +- [TencentBlueKing/bk-iam](https://github.com/TencentBlueKing/bk-iam) +- [TencentBlueKing/bk-iam-saas](https://github.com/TencentBlueKing/bk-iam-saas) +- [TencentBlueKing/bk-iam-search-engine](https://github.com/TencentBlueKing/bk-iam-search-engine) +- [TencentBlueKing/bk-iam-cli](https://github.com/TencentBlueKing/bk-iam-cli) - [TencentBlueKing/iam-python-sdk](https://github.com/TencentBlueKing/iam-python-sdk) - [TencentBlueKing/iam-go-sdk](https://github.com/TencentBlueKing/iam-go-sdk) +- [TencentBlueKing/iam-php-sdk](https://github.com/TencentBlueKing/iam-php-sdk) ## Support diff --git a/release.md b/release.md index 6bbe3728..f1c19422 100644 --- a/release.md +++ b/release.md @@ -1,8 +1,26 @@ +# 1.10.3 + +- upgrade: release environment attributes + +# 1.10.2 + +- bugfix: API /api/v1/engine/credentials/verify use auth + +# 1.10.1 + +- add: API /api/v1/web//unreferenced-expressions +- upgrade: go version 1.17 + +# 1.10.0 + +- upgrade: permission model supports environment attributes + # 1.9.5 - bugfix: healthz check fail if enable bkauth - bugfix: ModelChangeEvent when action(no policy related) be deleted + # 1.9.4 - add: bkauth support diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/go.mod b/vendor/github.com/DATA-DOG/go-sqlmock/go.mod deleted file mode 100644 index eaf8a5ae..00000000 --- a/vendor/github.com/DATA-DOG/go-sqlmock/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/DATA-DOG/go-sqlmock diff --git a/vendor/github.com/TencentBlueKing/iam-go-sdk/expression/eval/compare.go b/vendor/github.com/TencentBlueKing/iam-go-sdk/expression/eval/compare.go index e6c16a11..8e69920f 100644 --- a/vendor/github.com/TencentBlueKing/iam-go-sdk/expression/eval/compare.go +++ b/vendor/github.com/TencentBlueKing/iam-go-sdk/expression/eval/compare.go @@ -38,7 +38,10 @@ package eval import ( + "encoding/json" + "fmt" "reflect" + "strings" ) // github.com/stretchr/testify/assert/assertion_compare.go @@ -342,6 +345,17 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareEqual, false } +// ValueEqual asserts that the first element is value-equal to the second +// only care about the value, will cast the type to the same type before do compare +// +// assert.ValueEqual(t, 2, 1) +// assert.ValueEqual(t, 2, float64(2.0)) +// assert.ValueEqual(t, "b", "a") +// it's different to the `Equals` which use reflect.DeepEqual will check the type and value are the same +func ValueEqual(e1 interface{}, e2 interface{}) bool { + return compareTwoValues(e1, e2, []CompareType{compareEqual}) +} + // Greater asserts that the first element is greater than the second // // assert.Greater(t, 2, 1) @@ -380,10 +394,177 @@ func LessOrEqual(e1 interface{}, e2 interface{}) bool { return compareTwoValues(e1, e2, []CompareType{compareLess, compareEqual}) } -func compareTwoValues(e1 interface{}, e2 interface{}, allowedComparesResults []CompareType) bool { +// the max precision is int64, so uint64 not supported => or only part of uint64 supported? + +// uint8 the set of all unsigned 8-bit integers (0 to 255) +// uint16 the set of all unsigned 16-bit integers (0 to 65535) +// uint32 the set of all unsigned 32-bit integers (0 to 4294967295) +// uint64 the set of all unsigned 64-bit integers (0 to 18446744073709551615) +// +// int8 the set of all signed 8-bit integers (-128 to 127) +// int16 the set of all signed 16-bit integers (-32768 to 32767) +// int32 the set of all signed 32-bit integers (-2147483648 to 2147483647) +// int64 the set of all signed 64-bit integers (-9223372036854775808 to 9223372036854775807) + +// float32 the set of all IEEE-754 32-bit floating-point numbers +// float64 the set of all IEEE-754 64-bit floating-point numbers + +func isNumberKind(kind reflect.Kind) bool { + switch kind { + case reflect.Int64, reflect.Float64, reflect.Int, reflect.Float32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int8, reflect.Int16, reflect.Int32: + return true + default: + return false + } +} + +func isFloatKind(kind reflect.Kind) bool { + switch kind { + case reflect.Float64, reflect.Float32: + return true + default: + return false + } +} + +func toInt64(i interface{}) (int64, error) { + switch s := i.(type) { + case int: + return int64(s), nil + case int64: + return s, nil + case int32: + return int64(s), nil + case int16: + return int64(s), nil + case int8: + return int64(s), nil + case uint: + return int64(s), nil + case uint64: + // NOTE: precision lost + return int64(s), nil + case uint32: + return int64(s), nil + case uint16: + return int64(s), nil + case uint8: + return int64(s), nil + // NOTE: only cast between int*, no float32/float64 + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + } +} + +func toFloat64(i interface{}) (float64, error) { + switch s := i.(type) { + case float64: + return s, nil + case float32: + return float64(s), nil + case int: + return float64(s), nil + case int64: + // NOTE: precision lost + return float64(s), nil + case int32: + return float64(s), nil + case int16: + return float64(s), nil + case int8: + return float64(s), nil + case uint: + return float64(s), nil + case uint64: + // NOTE: precision lost + return float64(s), nil + case uint32: + return float64(s), nil + case uint16: + return float64(s), nil + case uint8: + return float64(s), nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + } +} + +func castJsonNumber(i interface{}) (interface{}, reflect.Kind, error) { + n, ok := i.(json.Number) + if !ok { + return nil, reflect.Invalid, fmt.Errorf("cast interface to json.Number fail") + } + + // NOTE: precision lost + if strings.IndexByte(n.String(), '.') != -1 { + value, err := n.Float64() + if err != nil { + return nil, reflect.Invalid, err + } + return value, reflect.Float64, nil + } + + value, err := n.Int64() + if err != nil { + return nil, reflect.Invalid, err + } + return value, reflect.Int64, nil +} + +func compareTwoValues(e1 interface{}, e2 interface{}, allowedComparesResults []CompareType) bool { e1Kind := reflect.ValueOf(e1).Kind() e2Kind := reflect.ValueOf(e2).Kind() + + if e1 != nil && e2 != nil { + // if got json.Number => cast to the int64 or float64 + if reflect.TypeOf(e1).String() == "json.Number" { + newE1, newE1Kind, err := castJsonNumber(e1) + if err == nil { + e1 = newE1 + e1Kind = newE1Kind + } + } + if reflect.TypeOf(e2).String() == "json.Number" { + newE2, newE2Kind, err := castJsonNumber(e2) + if err == nil { + e2 = newE2 + e2Kind = newE2Kind + } + } + + // here, we support number types: int64/float64 compare + // check and cast to same type: int64 or float64 and do compare later + // but, here got a precision lost, which may case the eval result wrong + if e1Kind != e2Kind && isNumberKind(e1Kind) && isNumberKind(e2Kind) { + if isFloatKind(e1Kind) || isFloatKind(e2Kind) { + // both cast to float64 + newE1, err := toFloat64(e1) + if err == nil { + e1 = newE1 + e1Kind = reflect.Float64 + } + newE2, err2 := toFloat64(e2) + if err2 == nil { + e2 = newE2 + e2Kind = reflect.Float64 + } + } else { + // both cast to int64 + newE1, err := toInt64(e1) + if err == nil { + e1 = newE1 + e1Kind = reflect.Int64 + } + newE2, err2 := toInt64(e2) + if err2 == nil { + e2 = newE2 + e2Kind = reflect.Int64 + } + } + } + } + if e1Kind != e2Kind { // Elements should be the same type return false @@ -397,7 +578,7 @@ func compareTwoValues(e1 interface{}, e2 interface{}, allowedComparesResults []C if !containsValue(allowedComparesResults, compareResult) { return false - //return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + // return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) } return true diff --git a/vendor/github.com/agiledragon/gomonkey/LICENSE b/vendor/github.com/agiledragon/gomonkey/v2/LICENSE similarity index 100% rename from vendor/github.com/agiledragon/gomonkey/LICENSE rename to vendor/github.com/agiledragon/gomonkey/v2/LICENSE diff --git a/vendor/github.com/agiledragon/gomonkey/README.md b/vendor/github.com/agiledragon/gomonkey/v2/README.md similarity index 65% rename from vendor/github.com/agiledragon/gomonkey/README.md rename to vendor/github.com/agiledragon/gomonkey/v2/README.md index 04d9e73c..ee888b4c 100644 --- a/vendor/github.com/agiledragon/gomonkey/README.md +++ b/vendor/github.com/agiledragon/gomonkey/v2/README.md @@ -1,11 +1,12 @@ # gomonkey -gomonkey is a library to make monkey patching in unit tests easy. +gomonkey is a library to make monkey patching in unit tests easy, and the core idea of monkey patching comes from [Bouke](https://github.com/bouk), you can read [this blogpost](https://bou.ke/blog/monkey-patching-in-go/) for an explanation on how it works. ## Features + support a patch for a function -+ support a patch for a member method ++ support a patch for a public member method ++ support a patch for a private member method + support a patch for a interface + support a patch for a function variable + support a patch for a global variable @@ -16,21 +17,33 @@ gomonkey is a library to make monkey patching in unit tests easy. ## Notes + gomonkey fails to patch a function or a member method if inlining is enabled, please running your tests with inlining disabled by adding the command line argument that is `-gcflags=-l`(below go1.10) or `-gcflags=all=-l`(go1.10 and above). -+ gomonkey should work on any amd64 system. + A panic may happen when a goroutine is patching a function or a member method that is visited by another goroutine at the same time. That is to say, gomonkey is not threadsafe. -+ go1.6 version of the reflection mechanism supports the query of private member methods, but go1.7 and above does not support it. However, all versions of the reflection mechanism support the query of private functions, so gomonkey will trigger a `panic` for only patching a private member method when go1.7 and above is used. - ## Supported Platform: - MAC OS X amd64 - Linux amd64 - Windows amd64 +- MAC OS X arm64 +- Linux arm64 +- Windows arm64 ## Installation +- below v2.1.0, for example v2.0.2 +```go +$ go get github.com/agiledragon/gomonkey@v2.0.2 +``` +- v2.1.0 and above, for example v2.2.0 ```go -$ go get github.com/agiledragon/gomonkey +$ go get github.com/agiledragon/gomonkey/v2@v2.2.0 ``` + +## Test Method +```go +$ cd test +$ go test -gcflags=all=-l +``` + ## Using gomonkey Please refer to the test cases as idioms, very complete and detailed. diff --git a/vendor/github.com/agiledragon/gomonkey/v2/creflect/type.go b/vendor/github.com/agiledragon/gomonkey/v2/creflect/type.go new file mode 100644 index 00000000..048319c9 --- /dev/null +++ b/vendor/github.com/agiledragon/gomonkey/v2/creflect/type.go @@ -0,0 +1,194 @@ +// Customized reflect package for gomonkey,copy most code from go/src/reflect/type.go + +package creflect + +import ( + "reflect" + "unsafe" +) + +// rtype is the common implementation of most values. +// rtype must be kept in sync with ../runtime/type.go:/^type._type. +type rtype struct { + size uintptr + ptrdata uintptr // number of bytes in the type that can contain pointers + hash uint32 // hash of type; avoids computation in hash tables + tflag tflag // extra type information flags + align uint8 // alignment of variable with this type + fieldAlign uint8 // alignment of struct field with this type + kind uint8 // enumeration for C + // function for comparing objects of this type + // (ptr to object A, ptr to object B) -> ==? + equal func(unsafe.Pointer, unsafe.Pointer) bool + gcdata *byte // garbage collection data + str nameOff // string form + ptrToThis typeOff // type for pointer to this type, may be zero +} + +func Create(t reflect.Type) *rtype { + i := *(*funcValue)(unsafe.Pointer(&t)) + r := (*rtype)(i.p) + return r +} + +type funcValue struct { + _ uintptr + p unsafe.Pointer +} +func funcPointer(v reflect.Method, ok bool) (unsafe.Pointer, bool) { + return (*funcValue)(unsafe.Pointer(&v.Func)).p, ok +} +func MethodByName(r reflect.Type, name string) (fn unsafe.Pointer, ok bool) { + t := Create(r) + if r.Kind() == reflect.Interface { + return funcPointer(r.MethodByName(name)) + } + ut := t.uncommon(r) + if ut == nil { + return nil, false + } + + for _, p := range ut.methods() { + if t.nameOff(p.name).name() == name { + return t.Method(p), true + } + } + return nil, false +} + +func (t *rtype) Method(p method) (fn unsafe.Pointer) { + tfn := t.textOff(p.tfn) + fn = unsafe.Pointer(&tfn) + return +} + +type tflag uint8 +type nameOff int32 // offset to a name +type typeOff int32 // offset to an *rtype +type textOff int32 // offset from top of text section + +//go:linkname resolveTextOff reflect.resolveTextOff +func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer + +func (t *rtype) textOff(off textOff) unsafe.Pointer { + return resolveTextOff(unsafe.Pointer(t), int32(off)) +} + +//go:linkname resolveNameOff reflect.resolveNameOff +func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer + +func (t *rtype) nameOff(off nameOff) name { + return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} +} + +const ( + tflagUncommon tflag = 1 << 0 +) +// uncommonType is present only for defined types or types with methods +type uncommonType struct { + pkgPath nameOff // import path; empty for built-in types like int, string + mcount uint16 // number of methods + xcount uint16 // number of exported methods + moff uint32 // offset from this uncommontype to [mcount]method + _ uint32 // unused +} + +// ptrType represents a pointer type. +type ptrType struct { + rtype + elem *rtype // pointer element (pointed at) type +} + +// funcType represents a function type. +type funcType struct { + rtype + inCount uint16 + outCount uint16 // top bit is set if last input parameter is ... +} + +func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + x) +} + +// interfaceType represents an interface type. +type interfaceType struct { + rtype + pkgPath name // import path + methods []imethod // sorted by hash +} + +type imethod struct { + name nameOff // name of method + typ typeOff // .(*FuncType) underneath +} + +// name is an encoded type name with optional extra data. +type name struct { + bytes *byte +} + +type String struct { + Data unsafe.Pointer + Len int +} + +func (n name) name() (s string) { + if n.bytes == nil { + return + } + b := (*[4]byte)(unsafe.Pointer(n.bytes)) + + hdr := (*String)(unsafe.Pointer(&s)) + hdr.Data = unsafe.Pointer(&b[3]) + hdr.Len = int(b[1])<<8 | int(b[2]) + return s +} + +func (t *rtype) uncommon(r reflect.Type) *uncommonType { + if t.tflag&tflagUncommon == 0 { + return nil + } + switch r.Kind() { + case reflect.Ptr: + type u struct { + ptrType + u uncommonType + } + return &(*u)(unsafe.Pointer(t)).u + case reflect.Func: + type u struct { + funcType + u uncommonType + } + return &(*u)(unsafe.Pointer(t)).u + case reflect.Interface: + type u struct { + interfaceType + u uncommonType + } + return &(*u)(unsafe.Pointer(t)).u + case reflect.Struct: + type u struct { + interfaceType + u uncommonType + } + return &(*u)(unsafe.Pointer(t)).u + default: + return nil + } +} + +// Method on non-interface type +type method struct { + name nameOff // name of method + mtyp typeOff // method type (without receiver) + ifn textOff // fn used in interface call (one-word receiver) + tfn textOff // fn used for normal method call +} + +func (t *uncommonType) methods() []method { + if t.mcount == 0 { + return nil + } + return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount] +} \ No newline at end of file diff --git a/vendor/github.com/agiledragon/gomonkey/jmp_amd64.go b/vendor/github.com/agiledragon/gomonkey/v2/jmp_amd64.go similarity index 100% rename from vendor/github.com/agiledragon/gomonkey/jmp_amd64.go rename to vendor/github.com/agiledragon/gomonkey/v2/jmp_amd64.go diff --git a/vendor/github.com/agiledragon/gomonkey/v2/jmp_arm64.go b/vendor/github.com/agiledragon/gomonkey/v2/jmp_arm64.go new file mode 100644 index 00000000..add3fd99 --- /dev/null +++ b/vendor/github.com/agiledragon/gomonkey/v2/jmp_arm64.go @@ -0,0 +1,34 @@ +package gomonkey + +import "unsafe" + +func buildJmpDirective(double uintptr) []byte { + res := make([]byte, 0, 24) + d0d1 := double & 0xFFFF + d2d3 := double >> 16 & 0xFFFF + d4d5 := double >> 32 & 0xFFFF + d6d7 := double >> 48 & 0xFFFF + + res = append(res, movImm(0B10, 0, d0d1)...) // MOVZ x26, double[16:0] + res = append(res, movImm(0B11, 1, d2d3)...) // MOVK x26, double[32:16] + res = append(res, movImm(0B11, 2, d4d5)...) // MOVK x26, double[48:32] + res = append(res, movImm(0B11, 3, d6d7)...) // MOVK x26, double[64:48] + res = append(res, []byte{0x4A, 0x03, 0x40, 0xF9}...) // LDR x10, [x26] + res = append(res, []byte{0x40, 0x01, 0x1F, 0xD6}...) // BR x10 + + return res +} + +func movImm(opc, shift int, val uintptr) []byte { + var m uint32 = 26 // rd + m |= uint32(val) << 5 // imm16 + m |= uint32(shift&3) << 21 // hw + m |= 0b100101 << 23 // const + m |= uint32(opc&0x3) << 29 // opc + m |= 0b1 << 31 // sf + + res := make([]byte, 4) + *(*uint32)(unsafe.Pointer(&res[0])) = m + + return res +} diff --git a/vendor/github.com/agiledragon/gomonkey/modify_binary_darwin.go b/vendor/github.com/agiledragon/gomonkey/v2/modify_binary_darwin.go similarity index 100% rename from vendor/github.com/agiledragon/gomonkey/modify_binary_darwin.go rename to vendor/github.com/agiledragon/gomonkey/v2/modify_binary_darwin.go diff --git a/vendor/github.com/agiledragon/gomonkey/modify_binary_linux.go b/vendor/github.com/agiledragon/gomonkey/v2/modify_binary_linux.go similarity index 100% rename from vendor/github.com/agiledragon/gomonkey/modify_binary_linux.go rename to vendor/github.com/agiledragon/gomonkey/v2/modify_binary_linux.go diff --git a/vendor/github.com/agiledragon/gomonkey/modify_binary_windows.go b/vendor/github.com/agiledragon/gomonkey/v2/modify_binary_windows.go similarity index 100% rename from vendor/github.com/agiledragon/gomonkey/modify_binary_windows.go rename to vendor/github.com/agiledragon/gomonkey/v2/modify_binary_windows.go diff --git a/vendor/github.com/agiledragon/gomonkey/patch.go b/vendor/github.com/agiledragon/gomonkey/v2/patch.go similarity index 80% rename from vendor/github.com/agiledragon/gomonkey/patch.go rename to vendor/github.com/agiledragon/gomonkey/v2/patch.go index 99df3885..60c10945 100644 --- a/vendor/github.com/agiledragon/gomonkey/patch.go +++ b/vendor/github.com/agiledragon/gomonkey/v2/patch.go @@ -2,13 +2,14 @@ package gomonkey import ( "fmt" + "github.com/agiledragon/gomonkey/v2/creflect" "reflect" "syscall" "unsafe" ) type Patches struct { - originals map[reflect.Value][]byte + originals map[uintptr][]byte values map[reflect.Value]reflect.Value valueHolders map[reflect.Value]reflect.Value } @@ -27,6 +28,10 @@ func ApplyMethod(target reflect.Type, methodName string, double interface{}) *Pa return create().ApplyMethod(target, methodName, double) } +func ApplyPrivateMethod(target reflect.Type, methodName string, double interface{}) *Patches { + return create().ApplyPrivateMethod(target, methodName, double) +} + func ApplyGlobalVar(target, double interface{}) *Patches { return create().ApplyGlobalVar(target, double) } @@ -48,7 +53,7 @@ func ApplyFuncVarSeq(target interface{}, outputs []OutputCell) *Patches { } func create() *Patches { - return &Patches{originals: make(map[reflect.Value][]byte), values: make(map[reflect.Value]reflect.Value), valueHolders: make(map[reflect.Value]reflect.Value)} + return &Patches{originals: make(map[uintptr][]byte), values: make(map[reflect.Value]reflect.Value), valueHolders: make(map[reflect.Value]reflect.Value)} } func NewPatches() *Patches { @@ -70,6 +75,15 @@ func (this *Patches) ApplyMethod(target reflect.Type, methodName string, double return this.ApplyCore(m.Func, d) } +func (this *Patches) ApplyPrivateMethod(target reflect.Type, methodName string, double interface{}) *Patches { + m, ok := creflect.MethodByName(target, methodName) + if !ok { + panic("retrieve method by name failed") + } + d := reflect.ValueOf(double) + return this.ApplyCoreOnlyForPrivateMethod(m, d) +} + func (this *Patches) ApplyGlobalVar(target, double interface{}) *Patches { t := reflect.ValueOf(target) if t.Type().Kind() != reflect.Ptr { @@ -124,7 +138,7 @@ func (this *Patches) ApplyFuncVarSeq(target interface{}, outputs []OutputCell) * func (this *Patches) Reset() { for target, bytes := range this.originals { - modifyBinary(*(*uintptr)(getPointer(target)), bytes) + modifyBinary(target, bytes) delete(this.originals, target) } @@ -135,13 +149,28 @@ func (this *Patches) Reset() { func (this *Patches) ApplyCore(target, double reflect.Value) *Patches { this.check(target, double) - if _, ok := this.originals[target]; ok { + assTarget := *(*uintptr)(getPointer(target)) + if _, ok := this.originals[assTarget]; ok { panic("patch has been existed") } this.valueHolders[double] = double - original := replace(*(*uintptr)(getPointer(target)), uintptr(getPointer(double))) - this.originals[target] = original + original := replace(assTarget, uintptr(getPointer(double))) + this.originals[assTarget] = original + return this +} + +func (this *Patches) ApplyCoreOnlyForPrivateMethod(target unsafe.Pointer, double reflect.Value) *Patches { + if double.Kind() != reflect.Func { + panic("double is not a func") + } + assTarget := *(*uintptr)(target) + if _, ok := this.originals[assTarget]; ok { + panic("patch has been existed") + } + this.valueHolders[double] = double + original := replace(assTarget, uintptr(getPointer(double))) + this.originals[assTarget] = original return this } diff --git a/vendor/github.com/alecthomas/template/go.mod b/vendor/github.com/alecthomas/template/go.mod deleted file mode 100644 index a70670ae..00000000 --- a/vendor/github.com/alecthomas/template/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/alecthomas/template diff --git a/vendor/github.com/alicebob/miniredis/.gitignore b/vendor/github.com/alicebob/miniredis/.gitignore deleted file mode 100644 index a6fadca4..00000000 --- a/vendor/github.com/alicebob/miniredis/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/integration/redis_src/ diff --git a/vendor/github.com/alicebob/miniredis/.travis.yml b/vendor/github.com/alicebob/miniredis/.travis.yml deleted file mode 100644 index d9122d17..00000000 --- a/vendor/github.com/alicebob/miniredis/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -before_script: - - (cd ./integration && ./get_redis.sh) - -install: go get -t - -script: make test testrace int - -sudo: false - -go: - - 1.11 diff --git a/vendor/github.com/alicebob/miniredis/cmd_connection.go b/vendor/github.com/alicebob/miniredis/cmd_connection.go deleted file mode 100644 index ca648f4b..00000000 --- a/vendor/github.com/alicebob/miniredis/cmd_connection.go +++ /dev/null @@ -1,96 +0,0 @@ -// Commands from https://redis.io/commands#connection - -package miniredis - -import ( - "strconv" - - "github.com/alicebob/miniredis/server" -) - -func commandsConnection(m *Miniredis) { - m.srv.Register("AUTH", m.cmdAuth) - m.srv.Register("ECHO", m.cmdEcho) - m.srv.Register("PING", m.cmdPing) - m.srv.Register("SELECT", m.cmdSelect) - m.srv.Register("QUIT", m.cmdQuit) -} - -// PING -func (m *Miniredis) cmdPing(c *server.Peer, cmd string, args []string) { - if !m.handleAuth(c) { - return - } - c.WriteInline("PONG") -} - -// AUTH -func (m *Miniredis) cmdAuth(c *server.Peer, cmd string, args []string) { - if len(args) != 1 { - setDirty(c) - c.WriteError(errWrongNumber(cmd)) - return - } - pw := args[0] - - m.Lock() - defer m.Unlock() - if m.password == "" { - c.WriteError("ERR Client sent AUTH, but no password is set") - return - } - if m.password != pw { - c.WriteError("ERR invalid password") - return - } - - setAuthenticated(c) - c.WriteOK() -} - -// ECHO -func (m *Miniredis) cmdEcho(c *server.Peer, cmd string, args []string) { - if len(args) != 1 { - setDirty(c) - c.WriteError(errWrongNumber(cmd)) - return - } - if !m.handleAuth(c) { - return - } - - msg := args[0] - c.WriteBulk(msg) -} - -// SELECT -func (m *Miniredis) cmdSelect(c *server.Peer, cmd string, args []string) { - if len(args) != 1 { - setDirty(c) - c.WriteError(errWrongNumber(cmd)) - return - } - if !m.handleAuth(c) { - return - } - - id, err := strconv.Atoi(args[0]) - if err != nil { - id = 0 - } - - m.Lock() - defer m.Unlock() - - ctx := getCtx(c) - ctx.selectedDB = id - - c.WriteOK() -} - -// QUIT -func (m *Miniredis) cmdQuit(c *server.Peer, cmd string, args []string) { - // QUIT isn't transactionfied and accepts any arguments. - c.WriteOK() - c.Close() -} diff --git a/vendor/github.com/alicebob/miniredis/miniredis.go b/vendor/github.com/alicebob/miniredis/miniredis.go deleted file mode 100644 index 0688bdfe..00000000 --- a/vendor/github.com/alicebob/miniredis/miniredis.go +++ /dev/null @@ -1,373 +0,0 @@ -// Package miniredis is a pure Go Redis test server, for use in Go unittests. -// There are no dependencies on system binaries, and every server you start -// will be empty. -// -// Start a server with `s, err := miniredis.Run()`. -// Stop it with `defer s.Close()`. -// -// Point your Redis client to `s.Addr()` or `s.Host(), s.Port()`. -// -// Set keys directly via s.Set(...) and similar commands, or use a Redis client. -// -// For direct use you can select a Redis database with either `s.Select(12); -// s.Get("foo")` or `s.DB(12).Get("foo")`. -// -package miniredis - -import ( - "fmt" - "net" - "strconv" - "sync" - "time" - - redigo "github.com/gomodule/redigo/redis" - - "github.com/alicebob/miniredis/server" -) - -type hashKey map[string]string -type listKey []string -type setKey map[string]struct{} - -// RedisDB holds a single (numbered) Redis database. -type RedisDB struct { - master *sync.Mutex // pointer to the lock in Miniredis - id int // db id - keys map[string]string // Master map of keys with their type - stringKeys map[string]string // GET/SET &c. keys - hashKeys map[string]hashKey // MGET/MSET &c. keys - listKeys map[string]listKey // LPUSH &c. keys - setKeys map[string]setKey // SADD &c. keys - sortedsetKeys map[string]sortedSet // ZADD &c. keys - ttl map[string]time.Duration // effective TTL values - keyVersion map[string]uint // used to watch values -} - -// Miniredis is a Redis server implementation. -type Miniredis struct { - sync.Mutex - srv *server.Server - port int - password string - dbs map[int]*RedisDB - selectedDB int // DB id used in the direct Get(), Set() &c. - scripts map[string]string // sha1 -> lua src - signal *sync.Cond - now time.Time // used to make a duration from EXPIREAT. time.Now() if not set. -} - -type txCmd func(*server.Peer, *connCtx) - -// database id + key combo -type dbKey struct { - db int - key string -} - -// connCtx has all state for a single connection. -type connCtx struct { - selectedDB int // selected DB - authenticated bool // auth enabled and a valid AUTH seen - transaction []txCmd // transaction callbacks. Or nil. - dirtyTransaction bool // any error during QUEUEing. - watch map[dbKey]uint // WATCHed keys. -} - -// NewMiniRedis makes a new, non-started, Miniredis object. -func NewMiniRedis() *Miniredis { - m := Miniredis{ - dbs: map[int]*RedisDB{}, - scripts: map[string]string{}, - } - m.signal = sync.NewCond(&m) - return &m -} - -func newRedisDB(id int, l *sync.Mutex) RedisDB { - return RedisDB{ - id: id, - master: l, - keys: map[string]string{}, - stringKeys: map[string]string{}, - hashKeys: map[string]hashKey{}, - listKeys: map[string]listKey{}, - setKeys: map[string]setKey{}, - sortedsetKeys: map[string]sortedSet{}, - ttl: map[string]time.Duration{}, - keyVersion: map[string]uint{}, - } -} - -// Run creates and Start()s a Miniredis. -func Run() (*Miniredis, error) { - m := NewMiniRedis() - return m, m.Start() -} - -// Start starts a server. It listens on a random port on localhost. See also -// Addr(). -func (m *Miniredis) Start() error { - s, err := server.NewServer(fmt.Sprintf("127.0.0.1:%d", m.port)) - if err != nil { - return err - } - return m.start(s) -} - -// StartAddr runs miniredis with a given addr. Examples: "127.0.0.1:6379", -// ":6379", or "127.0.0.1:0" -func (m *Miniredis) StartAddr(addr string) error { - s, err := server.NewServer(addr) - if err != nil { - return err - } - return m.start(s) -} - -func (m *Miniredis) start(s *server.Server) error { - m.Lock() - defer m.Unlock() - m.srv = s - m.port = s.Addr().Port - - commandsConnection(m) - commandsGeneric(m) - commandsServer(m) - commandsString(m) - commandsHash(m) - commandsList(m) - commandsSet(m) - commandsSortedSet(m) - commandsTransaction(m) - commandsScripting(m) - - return nil -} - -// Restart restarts a Close()d server on the same port. Values will be -// preserved. -func (m *Miniredis) Restart() error { - return m.Start() -} - -// Close shuts down a Miniredis. -func (m *Miniredis) Close() { - m.Lock() - defer m.Unlock() - if m.srv == nil { - return - } - m.srv.Close() - m.srv = nil -} - -// RequireAuth makes every connection need to AUTH first. Disable again by -// setting an empty string. -func (m *Miniredis) RequireAuth(pw string) { - m.Lock() - defer m.Unlock() - m.password = pw -} - -// DB returns a DB by ID. -func (m *Miniredis) DB(i int) *RedisDB { - m.Lock() - defer m.Unlock() - return m.db(i) -} - -// get DB. No locks! -func (m *Miniredis) db(i int) *RedisDB { - if db, ok := m.dbs[i]; ok { - return db - } - db := newRedisDB(i, &m.Mutex) // the DB has our lock. - m.dbs[i] = &db - return &db -} - -// Addr returns '127.0.0.1:12345'. Can be given to a Dial(). See also Host() -// and Port(), which return the same things. -func (m *Miniredis) Addr() string { - m.Lock() - defer m.Unlock() - return m.srv.Addr().String() -} - -// Host returns the host part of Addr(). -func (m *Miniredis) Host() string { - m.Lock() - defer m.Unlock() - return m.srv.Addr().IP.String() -} - -// Port returns the (random) port part of Addr(). -func (m *Miniredis) Port() string { - m.Lock() - defer m.Unlock() - return strconv.Itoa(m.srv.Addr().Port) -} - -// CommandCount returns the number of processed commands. -func (m *Miniredis) CommandCount() int { - m.Lock() - defer m.Unlock() - return int(m.srv.TotalCommands()) -} - -// CurrentConnectionCount returns the number of currently connected clients. -func (m *Miniredis) CurrentConnectionCount() int { - m.Lock() - defer m.Unlock() - return m.srv.ClientsLen() -} - -// TotalConnectionCount returns the number of client connections since server start. -func (m *Miniredis) TotalConnectionCount() int { - m.Lock() - defer m.Unlock() - return int(m.srv.TotalConnections()) -} - -// FastForward decreases all TTLs by the given duration. All TTLs <= 0 will be -// expired. -func (m *Miniredis) FastForward(duration time.Duration) { - m.Lock() - defer m.Unlock() - for _, db := range m.dbs { - db.fastForward(duration) - } -} - -// redigo returns a redigo.Conn, connected using net.Pipe -func (m *Miniredis) redigo() redigo.Conn { - c1, c2 := net.Pipe() - m.srv.ServeConn(c1) - c := redigo.NewConn(c2, 0, 0) - if m.password != "" { - if _, err := c.Do("AUTH", m.password); err != nil { - // ? - } - } - return c -} - -// Dump returns a text version of the selected DB, usable for debugging. -func (m *Miniredis) Dump() string { - m.Lock() - defer m.Unlock() - - var ( - maxLen = 60 - indent = " " - db = m.db(m.selectedDB) - r = "" - v = func(s string) string { - suffix := "" - if len(s) > maxLen { - suffix = fmt.Sprintf("...(%d)", len(s)) - s = s[:maxLen-len(suffix)] - } - return fmt.Sprintf("%q%s", s, suffix) - } - ) - for _, k := range db.allKeys() { - r += fmt.Sprintf("- %s\n", k) - t := db.t(k) - switch t { - case "string": - r += fmt.Sprintf("%s%s\n", indent, v(db.stringKeys[k])) - case "hash": - for _, hk := range db.hashFields(k) { - r += fmt.Sprintf("%s%s: %s\n", indent, hk, v(db.hashGet(k, hk))) - } - case "list": - for _, lk := range db.listKeys[k] { - r += fmt.Sprintf("%s%s\n", indent, v(lk)) - } - case "set": - for _, mk := range db.setMembers(k) { - r += fmt.Sprintf("%s%s\n", indent, v(mk)) - } - case "zset": - for _, el := range db.ssetElements(k) { - r += fmt.Sprintf("%s%f: %s\n", indent, el.score, v(el.member)) - } - default: - r += fmt.Sprintf("%s(a %s, fixme!)\n", indent, t) - } - } - return r -} - -// SetTime sets the time against which EXPIREAT values are compared. EXPIREAT -// will use time.Now() if this is not set. -func (m *Miniredis) SetTime(t time.Time) { - m.Lock() - defer m.Unlock() - m.now = t -} - -// handleAuth returns false if connection has no access. It sends the reply. -func (m *Miniredis) handleAuth(c *server.Peer) bool { - m.Lock() - defer m.Unlock() - if m.password == "" { - return true - } - if !getCtx(c).authenticated { - c.WriteError("NOAUTH Authentication required.") - return false - } - return true -} - -func getCtx(c *server.Peer) *connCtx { - if c.Ctx == nil { - c.Ctx = &connCtx{} - } - return c.Ctx.(*connCtx) -} - -func startTx(ctx *connCtx) { - ctx.transaction = []txCmd{} - ctx.dirtyTransaction = false -} - -func stopTx(ctx *connCtx) { - ctx.transaction = nil - unwatch(ctx) -} - -func inTx(ctx *connCtx) bool { - return ctx.transaction != nil -} - -func addTxCmd(ctx *connCtx, cb txCmd) { - ctx.transaction = append(ctx.transaction, cb) -} - -func watch(db *RedisDB, ctx *connCtx, key string) { - if ctx.watch == nil { - ctx.watch = map[dbKey]uint{} - } - ctx.watch[dbKey{db: db.id, key: key}] = db.keyVersion[key] // Can be 0. -} - -func unwatch(ctx *connCtx) { - ctx.watch = nil -} - -// setDirty can be called even when not in an tx. Is an no-op then. -func setDirty(c *server.Peer) { - if c.Ctx == nil { - // No transaction. Not relevant. - return - } - getCtx(c).dirtyTransaction = true -} - -func setAuthenticated(c *server.Peer) { - getCtx(c).authenticated = true -} diff --git a/vendor/github.com/alicebob/miniredis/redis.go b/vendor/github.com/alicebob/miniredis/redis.go deleted file mode 100644 index 49ff7bc3..00000000 --- a/vendor/github.com/alicebob/miniredis/redis.go +++ /dev/null @@ -1,208 +0,0 @@ -package miniredis - -import ( - "fmt" - "math" - "strings" - "sync" - "time" - - "github.com/alicebob/miniredis/server" -) - -const ( - msgWrongType = "WRONGTYPE Operation against a key holding the wrong kind of value" - msgInvalidInt = "ERR value is not an integer or out of range" - msgInvalidFloat = "ERR value is not a valid float" - msgInvalidMinMax = "ERR min or max is not a float" - msgInvalidRangeItem = "ERR min or max not valid string range item" - msgInvalidTimeout = "ERR timeout is not an integer or out of range" - msgSyntaxError = "ERR syntax error" - msgKeyNotFound = "ERR no such key" - msgOutOfRange = "ERR index out of range" - msgInvalidCursor = "ERR invalid cursor" - msgXXandNX = "ERR XX and NX options at the same time are not compatible" - msgNegTimeout = "ERR timeout is negative" - msgInvalidSETime = "ERR invalid expire time in set" - msgInvalidSETEXTime = "ERR invalid expire time in setex" - msgInvalidPSETEXTime = "ERR invalid expire time in psetex" - msgInvalidKeysNumber = "ERR Number of keys can't be greater than number of args" - msgNegativeKeysNumber = "ERR Number of keys can't be negative" - msgFScriptUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try SCRIPT HELP." - msgSingleElementPair = "ERR INCR option supports a single increment-element pair" - msgNoScriptFound = "NOSCRIPT No matching script. Please use EVAL." -) - -func errWrongNumber(cmd string) string { - return fmt.Sprintf("ERR wrong number of arguments for '%s' command", strings.ToLower(cmd)) -} - -func errLuaParseError(err error) string { - return fmt.Sprintf("ERR Error compiling script (new function): %s", err.Error()) -} - -// withTx wraps the non-argument-checking part of command handling code in -// transaction logic. -func withTx( - m *Miniredis, - c *server.Peer, - cb txCmd, -) { - ctx := getCtx(c) - if inTx(ctx) { - addTxCmd(ctx, cb) - c.WriteInline("QUEUED") - return - } - m.Lock() - cb(c, ctx) - // done, wake up anyone who waits on anything. - m.signal.Broadcast() - m.Unlock() -} - -// blockCmd is executed returns whether it is done -type blockCmd func(*server.Peer, *connCtx) bool - -// blocking keeps trying a command until the callback returns true. Calls -// onTimeout after the timeout (or when we call this in a transaction). -func blocking( - m *Miniredis, - c *server.Peer, - timeout time.Duration, - cb blockCmd, - onTimeout func(*server.Peer), -) { - var ( - ctx = getCtx(c) - dl *time.Timer - dlc <-chan time.Time - ) - if inTx(ctx) { - addTxCmd(ctx, func(c *server.Peer, ctx *connCtx) { - if !cb(c, ctx) { - onTimeout(c) - } - }) - c.WriteInline("QUEUED") - return - } - if timeout != 0 { - dl = time.NewTimer(timeout) - defer dl.Stop() - dlc = dl.C - } - - m.Lock() - defer m.Unlock() - for { - done := cb(c, ctx) - if done { - return - } - // there is no cond.WaitTimeout(), so hence the the goroutine to wait - // for a timeout - var ( - wg sync.WaitGroup - wakeup = make(chan struct{}, 1) - ) - wg.Add(1) - go func() { - m.signal.Wait() - wakeup <- struct{}{} - wg.Done() - }() - select { - case <-wakeup: - case <-dlc: - onTimeout(c) - m.signal.Broadcast() // to kill the wakeup go routine - wg.Wait() - return - } - wg.Wait() - } -} - -// formatFloat formats a float the way redis does (sort-of) -func formatFloat(v float64) string { - // Format with %f and strip trailing 0s. This is the most like Redis does - // it :( - // .12 is the magic number where most output is the same as Redis. - if math.IsInf(v, +1) { - return "inf" - } - if math.IsInf(v, -1) { - return "-inf" - } - sv := fmt.Sprintf("%.12f", v) - for strings.Contains(sv, ".") { - if sv[len(sv)-1] != '0' { - break - } - // Remove trailing 0s. - sv = sv[:len(sv)-1] - // Ends with a '.'. - if sv[len(sv)-1] == '.' { - sv = sv[:len(sv)-1] - break - } - } - return sv -} - -// redisRange gives Go offsets for something l long with start/end in -// Redis semantics. Both start and end can be negative. -// Used for string range and list range things. -// The results can be used as: v[start:end] -// Note that GETRANGE (on a string key) never returns an empty string when end -// is a large negative number. -func redisRange(l, start, end int, stringSymantics bool) (int, int) { - if start < 0 { - start = l + start - if start < 0 { - start = 0 - } - } - if start > l { - start = l - } - - if end < 0 { - end = l + end - if end < 0 { - end = -1 - if stringSymantics { - end = 0 - } - } - } - end++ // end argument is inclusive in Redis. - if end > l { - end = l - } - - if end < start { - return 0, 0 - } - return start, end -} - -// matchKeys filters only matching keys. -// Will return an empty list on invalid match expression. -func matchKeys(keys []string, match string) []string { - re := patternRE(match) - if re == nil { - // Special case, the given pattern won't match anything / is - // invalid. - return nil - } - res := []string{} - for _, k := range keys { - if !re.MatchString(k) { - continue - } - res = append(res, k) - } - return res -} diff --git a/vendor/github.com/alicebob/miniredis/server/server.go b/vendor/github.com/alicebob/miniredis/server/server.go deleted file mode 100644 index 1796453d..00000000 --- a/vendor/github.com/alicebob/miniredis/server/server.go +++ /dev/null @@ -1,242 +0,0 @@ -package server - -import ( - "bufio" - "fmt" - "net" - "strings" - "sync" - "unicode" -) - -func errUnknownCommand(cmd string, args []string) string { - s := fmt.Sprintf("ERR unknown command `%s`, with args beginning with: ", cmd) - if len(args) > 20 { - args = args[:20] - } - for _, a := range args { - s += fmt.Sprintf("`%s`, ", a) - } - return s -} - -// Cmd is what Register expects -type Cmd func(c *Peer, cmd string, args []string) - -// Server is a simple redis server -type Server struct { - l net.Listener - cmds map[string]Cmd - peers map[net.Conn]struct{} - mu sync.Mutex - wg sync.WaitGroup - infoConns int - infoCmds int -} - -// NewServer makes a server listening on addr. Close with .Close(). -func NewServer(addr string) (*Server, error) { - s := Server{ - cmds: map[string]Cmd{}, - peers: map[net.Conn]struct{}{}, - } - - l, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - s.l = l - - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.serve(l) - }() - return &s, nil -} - -func (s *Server) serve(l net.Listener) { - for { - conn, err := l.Accept() - if err != nil { - return - } - s.ServeConn(conn) - } -} - -// ServeConn handles a net.Conn. Nice with net.Pipe() -func (s *Server) ServeConn(conn net.Conn) { - s.wg.Add(1) - go func() { - defer s.wg.Done() - defer conn.Close() - s.mu.Lock() - s.peers[conn] = struct{}{} - s.infoConns++ - s.mu.Unlock() - - s.servePeer(conn) - - s.mu.Lock() - delete(s.peers, conn) - s.mu.Unlock() - }() -} - -// Addr has the net.Addr struct -func (s *Server) Addr() *net.TCPAddr { - s.mu.Lock() - defer s.mu.Unlock() - if s.l == nil { - return nil - } - return s.l.Addr().(*net.TCPAddr) -} - -// Close a server started with NewServer. It will wait until all clients are -// closed. -func (s *Server) Close() { - s.mu.Lock() - if s.l != nil { - s.l.Close() - } - s.l = nil - for c := range s.peers { - c.Close() - } - s.mu.Unlock() - s.wg.Wait() -} - -// Register a command. It can't have been registered before. Safe to call on a -// running server. -func (s *Server) Register(cmd string, f Cmd) error { - s.mu.Lock() - defer s.mu.Unlock() - cmd = strings.ToUpper(cmd) - if _, ok := s.cmds[cmd]; ok { - return fmt.Errorf("command already registered: %s", cmd) - } - s.cmds[cmd] = f - return nil -} - -func (s *Server) servePeer(c net.Conn) { - r := bufio.NewReader(c) - cl := &Peer{ - w: bufio.NewWriter(c), - } - for { - args, err := readArray(r) - if err != nil { - return - } - s.dispatch(cl, args) - cl.w.Flush() - if cl.closed { - c.Close() - return - } - } -} - -func (s *Server) dispatch(c *Peer, args []string) { - cmd, args := args[0], args[1:] - cmdUp := strings.ToUpper(cmd) - s.mu.Lock() - cb, ok := s.cmds[cmdUp] - s.mu.Unlock() - if !ok { - c.WriteError(errUnknownCommand(cmd, args)) - return - } - - s.mu.Lock() - s.infoCmds++ - s.mu.Unlock() - cb(c, cmdUp, args) -} - -// TotalCommands is total (known) commands since this the server started -func (s *Server) TotalCommands() int { - s.mu.Lock() - defer s.mu.Unlock() - return s.infoCmds -} - -// ClientsLen gives the number of connected clients right now -func (s *Server) ClientsLen() int { - s.mu.Lock() - defer s.mu.Unlock() - return len(s.peers) -} - -// TotalConnections give the number of clients connected since the server -// started, including the currently connected ones -func (s *Server) TotalConnections() int { - s.mu.Lock() - defer s.mu.Unlock() - return s.infoConns -} - -// Peer is a client connected to the server -type Peer struct { - w *bufio.Writer - closed bool - Ctx interface{} // anything goes, server won't touch this -} - -// Flush the write buffer. Called automatically after every redis command -func (c *Peer) Flush() { - c.w.Flush() -} - -// Close the client connection after the current command is done. -func (c *Peer) Close() { - c.closed = true -} - -// WriteError writes a redis 'Error' -func (c *Peer) WriteError(e string) { - fmt.Fprintf(c.w, "-%s\r\n", toInline(e)) -} - -// WriteInline writes a redis inline string -func (c *Peer) WriteInline(s string) { - fmt.Fprintf(c.w, "+%s\r\n", toInline(s)) -} - -// WriteOK write the inline string `OK` -func (c *Peer) WriteOK() { - c.WriteInline("OK") -} - -// WriteBulk writes a bulk string -func (c *Peer) WriteBulk(s string) { - fmt.Fprintf(c.w, "$%d\r\n%s\r\n", len(s), s) -} - -// WriteNull writes a redis Null element -func (c *Peer) WriteNull() { - fmt.Fprintf(c.w, "$-1\r\n") -} - -// WriteLen starts an array with the given length -func (c *Peer) WriteLen(n int) { - fmt.Fprintf(c.w, "*%d\r\n", n) -} - -// WriteInt writes an integer -func (c *Peer) WriteInt(i int) { - fmt.Fprintf(c.w, ":%d\r\n", i) -} - -func toInline(s string) string { - return strings.Map(func(r rune) rune { - if unicode.IsSpace(r) { - return ' ' - } - return r - }, s) -} diff --git a/vendor/github.com/alicebob/miniredis/v2/.gitignore b/vendor/github.com/alicebob/miniredis/v2/.gitignore new file mode 100644 index 00000000..7ba06b06 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/.gitignore @@ -0,0 +1,4 @@ +/integration/redis_src/ +/integration/dump.rdb +*.swp +/integration/nodes.conf diff --git a/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md b/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md new file mode 100644 index 00000000..3c18e0ef --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md @@ -0,0 +1,181 @@ +## Changelog + + +### v2.17.0 + +- added miniredis.RunT(t) + + +### v2.16.1 + +- fix ZINTERSTORE with wets (thanks @lingjl2010 and @okhowang) +- fix exclusive ranges in XRANGE (thanks @joseotoro) + + +### v2.16.0 + +- simplify some code (thanks @zonque) +- support for EXAT/PXAT in SET +- support for XTRIM (thanks @joseotoro) +- support for ZRANDMEMBER +- support for redis.log() in lua (thanks @dirkm) + + +### v2.15.2 + +- Fix race condition in blocking code (thanks @zonque and @robx) +- XREAD accepts '$' as ID (thanks @bradengroom) + + +### v2.15.1 + +- EVAL should cache the script (thanks @guoshimin) + + +### v2.15.0 + +- target redis 6.2 and added new args to various commands +- support for all hyperlog commands (thanks @ilbaktin) +- support for GETDEL (thanks @wszaranski) + + +### v2.14.5 + +- added XPENDING +- support for BLOCK option in XREAD and XREADGROUP + + +### v2.14.4 + +- fix BITPOS error (thanks @xiaoyuzdy) +- small fixes for XREAD, XACK, and XDEL. Mostly error cases. +- fix empty EXEC return type (thanks @ashanbrown) +- fix XDEL (thanks @svakili and @yvesf) +- fix FLUSHALL for streams (thanks @svakili) + + +### v2.14.3 + +- fix problem where Lua code didn't set the selected DB +- update to redis 6.0.10 (thanks @lazappa) + + +### v2.14.2 + +- update LUA dependency +- deal with (p)unsubscribe when there are no channels + + +### v2.14.1 + +- mod tidy + + +### v2.14.0 + +- support for HELLO and the RESP3 protocol +- KEEPTTL in SET (thanks @johnpena) + + +### v2.13.3 + +- support Go 1.14 and 1.15 +- update the `Check...()` methods +- support for XREAD (thanks @pieterlexis) + + +### v2.13.2 + +- Use SAN instead of CN in self signed cert for testing (thanks @johejo) +- Travis CI now tests against the most recent two versions of Go (thanks @johejo) +- changed unit and integration tests to compare raw payloads, not parsed payloads +- remove "redigo" dependency + + +### v2.13.1 + +- added HSTRLEN +- minimal support for ACL users in AUTH + + +### v2.13.0 + +- added RunTLS(...) +- added SetError(...) + + +### v2.12.0 + +- redis 6 +- Lua json update (thanks @gsmith85) +- CLUSTER commands (thanks @kratisto) +- fix TOUCH +- fix a shutdown race condition + + +### v2.11.4 + +- ZUNIONSTORE now supports standard set types (thanks @wshirey) + + +### v2.11.3 + +- support for TOUCH (thanks @cleroux) +- support for cluster and stream commands (thanks @kak-tus) + + +### v2.11.2 + +- make sure Lua code is executed concurrently +- add command GEORADIUSBYMEMBER (thanks @kyeett) + + +### v2.11.1 + +- globals protection for Lua code (thanks @vk-outreach) +- HSET update (thanks @carlgreen) +- fix BLPOP block on shutdown (thanks @Asalle) + + +### v2.11.0 + +- added XRANGE/XREVRANGE, XADD, and XLEN (thanks @skateinmars) +- added GEODIST +- improved precision for geohashes, closer to what real redis does +- use 128bit floats internally for INCRBYFLOAT and related (thanks @timnd) + + +### v2.10.1 + +- added m.Server() + + +### v2.10.0 + +- added UNLINK +- fix DEL zero-argument case +- cleanup some direct access commands +- added GEOADD, GEOPOS, GEORADIUS, and GEORADIUS_RO + + +### v2.9.1 + +- fix issue with ZRANGEBYLEX +- fix issue with BRPOPLPUSH and direct access + + +### v2.9.0 + +- proper versioned import of github.com/gomodule/redigo (thanks @yfei1) +- fix messages generated by PSUBSCRIBE +- optional internal seed (thanks @zikaeroh) + + +### v2.8.0 + +Proper `v2` in go.mod. + + +### older + +See https://github.com/alicebob/miniredis/releases for the full changelog diff --git a/vendor/github.com/alicebob/miniredis/LICENSE b/vendor/github.com/alicebob/miniredis/v2/LICENSE similarity index 100% rename from vendor/github.com/alicebob/miniredis/LICENSE rename to vendor/github.com/alicebob/miniredis/v2/LICENSE diff --git a/vendor/github.com/alicebob/miniredis/Makefile b/vendor/github.com/alicebob/miniredis/v2/Makefile similarity index 100% rename from vendor/github.com/alicebob/miniredis/Makefile rename to vendor/github.com/alicebob/miniredis/v2/Makefile diff --git a/vendor/github.com/alicebob/miniredis/README.md b/vendor/github.com/alicebob/miniredis/v2/README.md similarity index 67% rename from vendor/github.com/alicebob/miniredis/README.md rename to vendor/github.com/alicebob/miniredis/v2/README.md index bfeed831..a6db0804 100644 --- a/vendor/github.com/alicebob/miniredis/README.md +++ b/vendor/github.com/alicebob/miniredis/v2/README.md @@ -16,67 +16,10 @@ stack. There are no dependencies on external binaries, so you can easily integrate it in automated build processes. -## Changelog - -### 2.5.0 - -Added ZPopMin and ZPopMax - -### v2.4.6 - -support for TIME (thanks @leon-barrett and @lirao) -support for ZREVRANGEBYLEX -fix for SINTER (thanks @robstein) -updates for latest redis - -### 2.4.4 - -Fixed nil Lua return value (#43) - -### 2.4.3 - -Fixed using Lua with authenticated redis. - -### 2.4.2 - -Changed redigo import path. - -### 2.4 - -Minor cleanups. Miniredis now requires Go >= 1.9 (only for the tests. If you don't run the tests you can use an older Go version). - -### 2.3.1 - -Lua changes: added `cjson` library, and `redis.sha1hex()`. - -### 2.3 - -Added the `EVAL`, `EVALSHA`, and `SCRIPT` commands. Uses a pure Go Lua interpreter. Please open an issue if there are problems with any Lua code. - -### 2.2 - -Introduced `StartAddr()`. - -### 2.1 - -Internal cleanups. No changes in functionality. - -### 2.0 - -2.0.0 improves TTLs to be `time.Duration` values. `.Expire()` is removed and -replaced by `.TTL()`, which returns the TTL as a `time.Duration`. -This should be the change needed to upgrade: - -1.0: - - m.Expire() == 4 - -2.0: - - m.TTL() == 4 * time.Second - -Furthermore, `.SetTime()` is added to help with `EXPIREAT` commands, and `.FastForward()` is introduced to test keys expiration. - +Be sure to import v2: +``` +import "github.com/alicebob/miniredis/v2" +``` ## Commands @@ -85,10 +28,12 @@ Implemented commands: - Connection (complete) - AUTH -- see RequireAuth() - ECHO + - HELLO -- see RequireUserAuth() - PING - SELECT + - SWAPDB - QUIT - - Key + - Key - DEL - EXISTS - EXPIRE @@ -101,10 +46,12 @@ Implemented commands: - PTTL - RENAME - RENAMENX - - RANDOMKEY -- call math.rand.Seed(...) once before using. + - RANDOMKEY -- see m.Seed(...) + - SCAN + - TOUCH - TTL - TYPE - - SCAN + - UNLINK - Transactions (complete) - DISCARD - EXEC @@ -127,6 +74,7 @@ Implemented commands: - GETBIT - GETRANGE - GETSET + - GETDEL - INCR - INCRBY - INCRBYFLOAT @@ -153,6 +101,7 @@ Implemented commands: - HMSET - HSET - HSETNX + - HSTRLEN - HVALS - HSCAN - List keys (complete) @@ -173,6 +122,13 @@ Implemented commands: - RPOPLPUSH - RPUSH - RPUSHX + - Pub/Sub (complete) + - PSUBSCRIBE + - PUBLISH + - PUBSUB + - PUNSUBSCRIBE + - SUBSCRIBE + - UNSUBSCRIBE - Set keys (complete) - SADD - SCARD @@ -183,8 +139,8 @@ Implemented commands: - SISMEMBER - SMEMBERS - SMOVE - - SPOP -- call math.rand.Seed(...) once before using. - - SRANDMEMBER -- call math.rand.Seed(...) once before using. + - SPOP -- see m.Seed(...) + - SRANDMEMBER -- see m.Seed(...) - SREM - SUNION - SUNIONSTORE @@ -198,6 +154,7 @@ Implemented commands: - ZLEXCOUNT - ZPOPMIN - ZPOPMAX + - ZRANDMEMBER - ZRANGE - ZRANGEBYLEX - ZRANGEBYSCORE @@ -213,12 +170,45 @@ Implemented commands: - ZSCORE - ZUNIONSTORE - ZSCAN + - Stream keys + - XACK + - XADD + - XDEL + - XGROUP CREATE + - XINFO STREAM -- partly + - XLEN + - XRANGE + - XREAD + - XREADGROUP + - XREVRANGE + - XPENDING + - XTRIM - Scripting - EVAL - EVALSHA - SCRIPT LOAD - SCRIPT EXISTS - SCRIPT FLUSH + - GEO + - GEOADD + - GEODIST + - ~~GEOHASH~~ + - GEOPOS + - GEORADIUS + - GEORADIUS_RO + - GEORADIUSBYMEMBER + - GEORADIUSBYMEMBER_RO + - Server + - COMMAND -- partly + - Cluster + - CLUSTER SLOTS + - CLUSTER KEYSLOT + - CLUSTER NODES + - HyperLogLog (complete) + - PFADD + - PFCOUNT + - PFMERGE + ## TTLs, key expiration, and time @@ -237,15 +227,26 @@ which case time.Now() will be used. SetTime() also sets the value returned by TIME, which defaults to time.Now(). It is not updated by FastForward, only by SetTime. +## Randomness and Seed() + +Miniredis will use `math/rand`'s global RNG for randomness unless a seed is +provided by calling `m.Seed(...)`. If a seed is provided, then miniredis will +use its own RNG based on that seed. + +Commands which use randomness are: RANDOMKEY, SPOP, and SRANDMEMBER. + ## Example ``` Go + +import ( + ... + "github.com/alicebob/miniredis/v2" + ... +) + func TestSomething(t *testing.T) { - s, err := miniredis.Run() - if err != nil { - panic(err) - } - defer s.Close() + s := miniredis.RunT(t) // Optionally set some keys your code expects: s.Set("foo", "bar") @@ -281,30 +282,12 @@ Commands which will probably not be implemented: - ~~CLUSTER *~~ - ~~READONLY~~ - ~~READWRITE~~ - - GEO (all) -- unless someone needs these - - ~~GEOADD~~ - - ~~GEODIST~~ - - ~~GEOHASH~~ - - ~~GEOPOS~~ - - ~~GEORADIUS~~ - - ~~GEORADIUSBYMEMBER~~ - - HyperLogLog (all) -- unless someone needs these - - ~~PFADD~~ - - ~~PFCOUNT~~ - - ~~PFMERGE~~ - Key - ~~DUMP~~ - ~~MIGRATE~~ - ~~OBJECT~~ - ~~RESTORE~~ - ~~WAIT~~ - - Pub/Sub (all) - - ~~PSUBSCRIBE~~ - - ~~PUBLISH~~ - - ~~PUBSUB~~ - - ~~PUNSUBSCRIBE~~ - - ~~SUBSCRIBE~~ - - ~~UNSUBSCRIBE~~ - Scripting - ~~SCRIPT DEBUG~~ - ~~SCRIPT KILL~~ @@ -312,7 +295,6 @@ Commands which will probably not be implemented: - ~~BGSAVE~~ - ~~BGWRITEAOF~~ - ~~CLIENT *~~ - - ~~COMMAND *~~ - ~~CONFIG *~~ - ~~DEBUG *~~ - ~~INFO~~ @@ -324,13 +306,19 @@ Commands which will probably not be implemented: - ~~SLAVEOF~~ - ~~SLOWLOG~~ - ~~SYNC~~ - + ## &c. -Tests are run against Redis 5.0.3. The [./integration](./integration/) subdir +Integration tests are run against Redis 6.2.4. The [./integration](./integration/) subdir compares miniredis against a real redis instance. +The Redis 6 RESP3 protocol is supported. If there are problems, please open +an issue. + +If you want to test Redis Sentinel have a look at [minisentinel](https://github.com/Bose/minisentinel). + +A changelog is kept at [CHANGELOG.md](https://github.com/alicebob/miniredis/blob/master/CHANGELOG.md). -[![Build Status](https://travis-ci.org/alicebob/miniredis.svg?branch=master)](https://travis-ci.org/alicebob/miniredis) -[![GoDoc](https://godoc.org/github.com/alicebob/miniredis?status.svg)](https://godoc.org/github.com/alicebob/miniredis) +[![Build Status](https://travis-ci.com/alicebob/miniredis.svg?branch=master)](https://travis-ci.com/alicebob/miniredis) +[![Go Reference](https://pkg.go.dev/badge/github.com/alicebob/miniredis/v2.svg)](https://pkg.go.dev/github.com/alicebob/miniredis/v2) diff --git a/vendor/github.com/alicebob/miniredis/check.go b/vendor/github.com/alicebob/miniredis/v2/check.go similarity index 64% rename from vendor/github.com/alicebob/miniredis/check.go rename to vendor/github.com/alicebob/miniredis/v2/check.go index 8b42b2e0..acd0d553 100644 --- a/vendor/github.com/alicebob/miniredis/check.go +++ b/vendor/github.com/alicebob/miniredis/v2/check.go @@ -1,30 +1,28 @@ package miniredis -// 'Fail' methods. - import ( - "fmt" - "path/filepath" "reflect" - "runtime" "sort" ) // T is implemented by Testing.T type T interface { - Fail() + Helper() + Errorf(string, ...interface{}) } // CheckGet does not call Errorf() iff there is a string key with the // expected value. Normal use case is `m.CheckGet(t, "username", "theking")`. func (m *Miniredis) CheckGet(t T, key, expected string) { + t.Helper() + found, err := m.Get(key) if err != nil { - lError(t, "GET error, key %#v: %v", key, err) + t.Errorf("GET error, key %#v: %v", key, err) return } if found != expected { - lError(t, "GET error, key %#v: Expected %#v, got %#v", key, expected, found) + t.Errorf("GET error, key %#v: Expected %#v, got %#v", key, expected, found) return } } @@ -33,13 +31,15 @@ func (m *Miniredis) CheckGet(t T, key, expected string) { // expected values. // Normal use case is `m.CheckGet(t, "favorite_colors", "red", "green", "infrared")`. func (m *Miniredis) CheckList(t T, key string, expected ...string) { + t.Helper() + found, err := m.List(key) if err != nil { - lError(t, "List error, key %#v: %v", key, err) + t.Errorf("List error, key %#v: %v", key, err) return } if !reflect.DeepEqual(expected, found) { - lError(t, "List error, key %#v: Expected %#v, got %#v", key, expected, found) + t.Errorf("List error, key %#v: Expected %#v, got %#v", key, expected, found) return } } @@ -48,21 +48,16 @@ func (m *Miniredis) CheckList(t T, key string, expected ...string) { // expected values. // Normal use case is `m.CheckSet(t, "visited", "Rome", "Stockholm", "Dublin")`. func (m *Miniredis) CheckSet(t T, key string, expected ...string) { + t.Helper() + found, err := m.Members(key) if err != nil { - lError(t, "Set error, key %#v: %v", key, err) + t.Errorf("Set error, key %#v: %v", key, err) return } sort.Strings(expected) if !reflect.DeepEqual(expected, found) { - lError(t, "Set error, key %#v: Expected %#v, got %#v", key, expected, found) + t.Errorf("Set error, key %#v: Expected %#v, got %#v", key, expected, found) return } } - -func lError(t T, format string, args ...interface{}) { - _, file, line, _ := runtime.Caller(2) - prefix := fmt.Sprintf("%s:%d: ", filepath.Base(file), line) - fmt.Printf(prefix+format+"\n", args...) - t.Fail() -} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go b/vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go new file mode 100644 index 00000000..083c4ecf --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go @@ -0,0 +1,66 @@ +// Commands from https://redis.io/commands#cluster + +package miniredis + +import ( + "fmt" + "github.com/alicebob/miniredis/v2/server" + "strings" +) + +// commandsCluster handles some cluster operations. +func commandsCluster(m *Miniredis) { + _ = m.srv.Register("CLUSTER", m.cmdCluster) +} + +func (m *Miniredis) cmdCluster(c *server.Peer, cmd string, args []string) { + if !m.handleAuth(c) { + return + } + + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + switch strings.ToUpper(args[0]) { + case "SLOTS": + m.cmdClusterSlots(c, cmd, args) + case "KEYSLOT": + m.cmdClusterKeySlot(c, cmd, args) + case "NODES": + m.cmdClusterNodes(c, cmd, args) + default: + setDirty(c) + c.WriteError(fmt.Sprintf("ERR 'CLUSTER %s' not supported", strings.Join(args, " "))) + return + } +} + +// CLUSTER SLOTS +func (m *Miniredis) cmdClusterSlots(c *server.Peer, cmd string, args []string) { + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + c.WriteLen(1) + c.WriteLen(3) + c.WriteInt(0) + c.WriteInt(16383) + c.WriteLen(3) + c.WriteBulk(m.srv.Addr().IP.String()) + c.WriteInt(m.srv.Addr().Port) + c.WriteBulk("09dbe9720cda62f7865eabc5fd8857c5d2678366") + }) +} + +//CLUSTER KEYSLOT +func (m *Miniredis) cmdClusterKeySlot(c *server.Peer, cmd string, args []string) { + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + c.WriteInt(163) + }) +} + +//CLUSTER NODES +func (m *Miniredis) cmdClusterNodes(c *server.Peer, cmd string, args []string) { + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + c.WriteBulk("e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:7000@7000 myself,master - 0 0 1 connected 0-16383") + }) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_command.go b/vendor/github.com/alicebob/miniredis/v2/cmd_command.go new file mode 100644 index 00000000..33c691c1 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_command.go @@ -0,0 +1,2049 @@ +// Command 'COMMAND' from https://redis.io/commands#server + +package miniredis + +import "github.com/alicebob/miniredis/v2/server" + +func commandsCommand(m *Miniredis) { + _ = m.srv.Register("COMMAND", m.cmdCommand) +} + +func (m *Miniredis) cmdCommand(c *server.Peer, cmd string, args []string) { + // Got from redis 5.0.7 with + // echo 'COMMAND' | nc redis_addr redis_port + // + res := ` +*200 +*6 +$12 +hincrbyfloat +:4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$10 +xreadgroup +:-7 +*3 ++write ++noscript ++movablekeys +:1 +:1 +:1 +*6 +$10 +sdiffstore +:-3 +*2 ++write ++denyoom +:1 +:-1 +:1 +*6 +$8 +lastsave +:1 +*2 ++random ++fast +:0 +:0 +:0 +*6 +$5 +setnx +:3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$8 +bzpopmax +:-3 +*3 ++write ++noscript ++fast +:1 +:-2 +:1 +*6 +$12 +punsubscribe +:-1 +*4 ++pubsub ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$4 +xack +:-4 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$10 +pfselftest +:1 +*1 ++admin +:0 +:0 +:0 +*6 +$6 +substr +:4 +*1 ++readonly +:1 +:1 +:1 +*6 +$8 +smembers +:2 +*2 ++readonly ++sort_for_script +:1 +:1 +:1 +*6 +$11 +unsubscribe +:-1 +*4 ++pubsub ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$11 +zinterstore +:-4 +*3 ++write ++denyoom ++movablekeys +:0 +:0 +:0 +*6 +$6 +strlen +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$7 +pfmerge +:-2 +*2 ++write ++denyoom +:1 +:-1 +:1 +*6 +$9 +randomkey +:1 +*2 ++readonly ++random +:0 +:0 +:0 +*6 +$6 +lolwut +:-1 +*1 ++readonly +:0 +:0 +:0 +*6 +$4 +rpop +:2 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$5 +hkeys +:2 +*2 ++readonly ++sort_for_script +:1 +:1 +:1 +*6 +$6 +client +:-2 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$6 +module +:-2 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$7 +slowlog +:-2 +*2 ++admin ++random +:0 +:0 +:0 +*6 +$7 +geohash +:-2 +*1 ++readonly +:1 +:1 +:1 +*6 +$6 +lrange +:4 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +ping +:-1 +*2 ++stale ++fast +:0 +:0 +:0 +*6 +$8 +bitcount +:-2 +*1 ++readonly +:1 +:1 +:1 +*6 +$6 +pubsub +:-2 +*4 ++pubsub ++random ++loading ++stale +:0 +:0 +:0 +*6 +$4 +role +:1 +*3 ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$4 +hget +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +object +:-2 +*2 ++readonly ++random +:2 +:2 +:1 +*6 +$9 +zrevrange +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$7 +hincrby +:4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$9 +zlexcount +:4 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$5 +scard +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +append +:3 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$7 +hstrlen +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +config +:-2 +*4 ++admin ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$4 +hset +:-4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$16 +zrevrangebyscore +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +incr +:2 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$6 +setbit +:4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$9 +rpoplpush +:3 +*2 ++write ++denyoom +:1 +:2 +:1 +*6 +$6 +xclaim +:-6 +*3 ++write ++random ++fast +:1 +:1 +:1 +*6 +$11 +sinterstore +:-3 +*2 ++write ++denyoom +:1 +:-1 +:1 +*6 +$7 +publish +:3 +*4 ++pubsub ++loading ++stale ++fast +:0 +:0 +:0 +*6 +$5 +hscan +:-3 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$5 +multi +:1 +*2 ++noscript ++fast +:0 +:0 +:0 +*6 +$3 +set +:-3 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$6 +lpushx +:-3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$16 +zremrangebyscore +:4 +*1 ++write +:1 +:1 +:1 +*6 +$9 +pexpireat +:3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$4 +hdel +:-3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$12 +bgrewriteaof +:1 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$7 +migrate +:-6 +*3 ++write ++random ++movablekeys +:0 +:0 +:0 +*6 +$9 +replicaof +:3 +*3 ++admin ++noscript ++stale +:0 +:0 +:0 +*6 +$5 +touch +:-2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +xsetid +:3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$5 +bitop +:-4 +*2 ++write ++denyoom +:2 +:-1 +:1 +*6 +$6 +swapdb +:3 +*2 ++write ++fast +:0 +:0 +:0 +*6 +$5 +sdiff +:-2 +*2 ++readonly ++sort_for_script +:1 +:-1 +:1 +*6 +$6 +lindex +:3 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +wait +:3 +*1 ++noscript +:0 +:0 +:0 +*6 +$4 +lrem +:4 +*1 ++write +:1 +:1 +:1 +*6 +$6 +hsetnx +:4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$8 +getrange +:4 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +hlen +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$4 +post +:-1 +*2 ++loading ++stale +:0 +:0 +:0 +*6 +$9 +sismember +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$7 +unwatch +:1 +*2 ++noscript ++fast +:0 +:0 +:0 +*6 +$5 +lpush +:-3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$4 +scan +:-2 +*2 ++readonly ++random +:0 +:0 +:0 +*6 +$5 +smove +:4 +*2 ++write ++fast +:1 +:2 +:1 +*6 +$7 +cluster +:-2 +*1 ++admin +:0 +:0 +:0 +*6 +$6 +bgsave +:-1 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$4 +dump +:2 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$7 +latency +:-2 +*4 ++admin ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$8 +bzpopmin +:-3 +*3 ++write ++noscript ++fast +:1 +:-2 +:1 +*6 +$6 +getbit +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$7 +hgetall +:2 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$6 +rename +:3 +*1 ++write +:1 +:2 +:1 +*6 +$9 +subscribe +:-2 +*4 ++pubsub ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$4 +xdel +:-3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$15 +zremrangebyrank +:4 +*1 ++write +:1 +:1 +:1 +*6 +$4 +type +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +script +:-2 +*1 ++noscript +:0 +:0 +:0 +*6 +$5 +hmset +:-4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$6 +sunion +:-2 +*2 ++readonly ++sort_for_script +:1 +:-1 +:1 +*6 +$4 +mget +:-2 +*2 ++readonly ++fast +:1 +:-1 +:1 +*6 +$10 +brpoplpush +:4 +*3 ++write ++denyoom ++noscript +:1 +:2 +:1 +*6 +$6 +geoadd +:-5 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$6 +decrby +:3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$4 +echo +:2 +*1 ++fast +:0 +:0 +:0 +*6 +$6 +dbsize +:1 +*2 ++readonly ++fast +:0 +:0 +:0 +*6 +$5 +zcard +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +select +:2 +*2 ++loading ++fast +:0 +:0 +:0 +*6 +$4 +sadd +:-3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$5 +host: +:-1 +*2 ++loading ++stale +:0 +:0 +:0 +*6 +$5 +sscan +:-3 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$12 +georadius_ro +:-6 +*2 ++readonly ++movablekeys +:1 +:1 +:1 +*6 +$7 +monitor +:1 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$14 +zremrangebylex +:4 +*1 ++write +:1 +:1 +:1 +*6 +$11 +sunionstore +:-3 +*2 ++write ++denyoom +:1 +:-1 +:1 +*6 +$5 +zscan +:-3 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$9 +readwrite +:1 +*1 ++fast +:0 +:0 +:0 +*6 +$6 +xgroup +:-2 +*2 ++write ++denyoom +:2 +:2 +:1 +*6 +$5 +setex +:4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$4 +save +:1 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$5 +hvals +:2 +*2 ++readonly ++sort_for_script +:1 +:1 +:1 +*6 +$5 +watch +:-2 +*2 ++noscript ++fast +:1 +:-1 +:1 +*6 +$7 +hexists +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$4 +info +:-1 +*3 ++random ++loading ++stale +:0 +:0 +:0 +*6 +$5 +psync +:3 +*3 ++readonly ++admin ++noscript +:0 +:0 +:0 +*6 +$11 +zrangebylex +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +zadd +:-4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$4 +xlen +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$4 +auth +:2 +*4 ++noscript ++loading ++stale ++fast +:0 +:0 +:0 +*6 +$4 +srem +:-3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$9 +georadius +:-6 +*2 ++write ++movablekeys +:1 +:1 +:1 +*6 +$4 +exec +:1 +*2 ++noscript ++skip_monitor +:0 +:0 +:0 +*6 +$7 +pfcount +:-2 +*1 ++readonly +:1 +:-1 +:1 +*6 +$7 +zpopmin +:-2 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$4 +move +:3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$5 +xtrim +:-2 +*3 ++write ++random ++fast +:1 +:1 +:1 +*6 +$6 +asking +:1 +*1 ++fast +:0 +:0 +:0 +*6 +$4 +pttl +:2 +*3 ++readonly ++random ++fast +:1 +:1 +:1 +*6 +$11 +srandmember +:-2 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$8 +flushall +:-1 +*1 ++write +:0 +:0 +:0 +*6 +$4 +sort +:-2 +*3 ++write ++denyoom ++movablekeys +:1 +:1 +:1 +*6 +$3 +del +:-2 +*1 ++write +:1 +:-1 +:1 +*6 +$14 +restore-asking +:-4 +*3 ++write ++denyoom ++asking +:1 +:1 +:1 +*6 +$10 +psubscribe +:-2 +*4 ++pubsub ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$4 +decr +:2 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$6 +incrby +:3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$14 +zrevrangebylex +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$8 +bitfield +:-2 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$6 +exists +:-2 +*2 ++readonly ++fast +:1 +:-1 +:1 +*6 +$8 +replconf +:-1 +*4 ++admin ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$7 +zincrby +:4 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$5 +blpop +:-3 +*2 ++write ++noscript +:1 +:-2 +:1 +*6 +$4 +lpop +:2 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$3 +ttl +:2 +*3 ++readonly ++random ++fast +:1 +:1 +:1 +*6 +$5 +xread +:-4 +*3 ++readonly ++noscript ++movablekeys +:1 +:1 +:1 +*6 +$5 +rpush +:-3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$8 +zrevrank +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$11 +incrbyfloat +:3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$5 +brpop +:-3 +*2 ++write ++noscript +:1 +:-2 +:1 +*6 +$4 +xadd +:-5 +*4 ++write ++denyoom ++random ++fast +:1 +:1 +:1 +*6 +$8 +setrange +:4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$17 +georadiusbymember +:-5 +*2 ++write ++movablekeys +:1 +:1 +:1 +*6 +$6 +unlink +:-2 +*2 ++write ++fast +:1 +:-1 +:1 +*6 +$8 +expireat +:3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$5 +debug +:-2 +*2 ++admin ++noscript +:0 +:0 +:0 +*6 +$20 +georadiusbymember_ro +:-5 +*2 ++readonly ++movablekeys +:1 +:1 +:1 +*6 +$4 +lset +:4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$6 +zscore +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$4 +llen +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$4 +time +:1 +*2 ++random ++fast +:0 +:0 +:0 +*6 +$8 +shutdown +:-1 +*4 ++admin ++noscript ++loading ++stale +:0 +:0 +:0 +*6 +$7 +evalsha +:-3 +*2 ++noscript ++movablekeys +:0 +:0 +:0 +*6 +$6 +zcount +:4 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +memory +:-2 +*2 ++readonly ++random +:0 +:0 +:0 +*6 +$5 +xinfo +:-2 +*2 ++readonly ++random +:2 +:2 +:1 +*6 +$8 +xpending +:-3 +*2 ++readonly ++random +:1 +:1 +:1 +*6 +$4 +eval +:-3 +*2 ++noscript ++movablekeys +:0 +:0 +:0 +*6 +$6 +xrange +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$7 +restore +:-4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$7 +zpopmax +:-2 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$4 +mset +:-3 +*2 ++write ++denyoom +:1 +:-1 +:2 +*6 +$4 +spop +:-2 +*3 ++write ++random ++fast +:1 +:1 +:1 +*6 +$5 +ltrim +:4 +*1 ++write +:1 +:1 +:1 +*6 +$5 +zrank +:3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$9 +xrevrange +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$3 +get +:2 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$7 +flushdb +:-1 +*1 ++write +:0 +:0 +:0 +*6 +$5 +hmget +:-3 +*2 ++readonly ++fast +:1 +:1 +:1 +*6 +$6 +msetnx +:-3 +*2 ++write ++denyoom +:1 +:-1 +:2 +*6 +$7 +persist +:2 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$11 +zunionstore +:-4 +*3 ++write ++denyoom ++movablekeys +:0 +:0 +:0 +*6 +$7 +command +:0 +*3 ++random ++loading ++stale +:0 +:0 +:0 +*6 +$8 +renamenx +:3 +*2 ++write ++fast +:1 +:2 +:1 +*6 +$6 +zrange +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$7 +pexpire +:3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$4 +keys +:2 +*2 ++readonly ++sort_for_script +:0 +:0 +:0 +*6 +$4 +zrem +:-3 +*2 ++write ++fast +:1 +:1 +:1 +*6 +$5 +pfadd +:-2 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$6 +psetex +:4 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$13 +zrangebyscore +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$4 +sync +:1 +*3 ++readonly ++admin ++noscript +:0 +:0 +:0 +*6 +$7 +pfdebug +:-3 +*1 ++write +:0 +:0 +:0 +*6 +$7 +discard +:1 +*2 ++noscript ++fast +:0 +:0 +:0 +*6 +$8 +readonly +:1 +*1 ++fast +:0 +:0 +:0 +*6 +$7 +geodist +:-4 +*1 ++readonly +:1 +:1 +:1 +*6 +$6 +geopos +:-2 +*1 ++readonly +:1 +:1 +:1 +*6 +$6 +bitpos +:-3 +*1 ++readonly +:1 +:1 +:1 +*6 +$6 +sinter +:-2 +*2 ++readonly ++sort_for_script +:1 +:-1 +:1 +*6 +$6 +getset +:3 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$7 +slaveof +:3 +*3 ++admin ++noscript ++stale +:0 +:0 +:0 +*6 +$6 +rpushx +:-3 +*3 ++write ++denyoom ++fast +:1 +:1 +:1 +*6 +$7 +linsert +:5 +*2 ++write ++denyoom +:1 +:1 +:1 +*6 +$6 +expire +:3 +*2 ++write ++fast +:1 +:1 +:1 + ` + + c.WriteBulk(res) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go b/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go new file mode 100644 index 00000000..1bf98012 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go @@ -0,0 +1,281 @@ +// Commands from https://redis.io/commands#connection + +package miniredis + +import ( + "fmt" + "strconv" + "strings" + + "github.com/alicebob/miniredis/v2/server" +) + +func commandsConnection(m *Miniredis) { + m.srv.Register("AUTH", m.cmdAuth) + m.srv.Register("ECHO", m.cmdEcho) + m.srv.Register("HELLO", m.cmdHello) + m.srv.Register("PING", m.cmdPing) + m.srv.Register("QUIT", m.cmdQuit) + m.srv.Register("SELECT", m.cmdSelect) + m.srv.Register("SWAPDB", m.cmdSwapdb) +} + +// PING +func (m *Miniredis) cmdPing(c *server.Peer, cmd string, args []string) { + if !m.handleAuth(c) { + return + } + + if len(args) > 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + payload := "" + if len(args) > 0 { + payload = args[0] + } + + // PING is allowed in subscribed state + if sub := getCtx(c).subscriber; sub != nil { + c.Block(func(c *server.Writer) { + c.WriteLen(2) + c.WriteBulk("pong") + c.WriteBulk(payload) + }) + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + if payload == "" { + c.WriteInline("PONG") + return + } + c.WriteBulk(payload) + }) +} + +// AUTH +func (m *Miniredis) cmdAuth(c *server.Peer, cmd string, args []string) { + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + if len(args) > 2 { + c.WriteError(msgSyntaxError) + return + } + if m.checkPubsub(c, cmd) { + return + } + if getCtx(c).nested { + c.WriteError(msgNotFromScripts) + return + } + username := "default" + pw := args[0] + if len(args) == 2 { + username, pw = args[0], args[1] + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + if len(m.passwords) == 0 && username == "default" { + c.WriteError("ERR AUTH called without any password configured for the default user. Are you sure your configuration is correct?") + return + } + setPW, ok := m.passwords[username] + if !ok { + c.WriteError("WRONGPASS invalid username-password pair") + return + } + if setPW != pw { + c.WriteError("WRONGPASS invalid username-password pair") + return + } + + ctx.authenticated = true + c.WriteOK() + }) +} + +// HELLO +func (m *Miniredis) cmdHello(c *server.Peer, cmd string, args []string) { + if len(args) < 1 { + c.WriteError(errWrongNumber(cmd)) + return + } + + var opts struct { + version int + username, password string + } + + versionArg, args := args[0], args[1:] + var err error + opts.version, err = strconv.Atoi(versionArg) + if err != nil { + c.WriteError("ERR Protocol version is not an integer or out of range") + return + } + switch opts.version { + case 2, 3: + default: + c.WriteError("NOPROTO unsupported protocol version") + return + } + + var checkAuth bool + for len(args) > 0 { + switch strings.ToUpper(args[0]) { + case "AUTH": + if len(args) < 3 { + c.WriteError(fmt.Sprintf("ERR Syntax error in HELLO option '%s'", args[0])) + return + } + opts.username, opts.password, args = args[1], args[2], args[3:] + checkAuth = true + case "SETNAME": + if len(args) < 2 { + c.WriteError(fmt.Sprintf("ERR Syntax error in HELLO option '%s'", args[0])) + return + } + _, args = args[1], args[2:] + default: + c.WriteError(fmt.Sprintf("ERR Syntax error in HELLO option '%s'", args[0])) + return + } + } + + if len(m.passwords) == 0 && opts.username == "default" { + // redis ignores legacy "AUTH" if it's not enabled. + checkAuth = false + } + if checkAuth { + setPW, ok := m.passwords[opts.username] + if !ok { + c.WriteError("WRONGPASS invalid username-password pair") + return + } + if setPW != opts.password { + c.WriteError("WRONGPASS invalid username-password pair") + return + } + getCtx(c).authenticated = true + } + + c.Resp3 = opts.version == 3 + + c.WriteMapLen(7) + c.WriteBulk("server") + c.WriteBulk("miniredis") + c.WriteBulk("version") + c.WriteBulk("6.0.5") + c.WriteBulk("proto") + c.WriteInt(opts.version) + c.WriteBulk("id") + c.WriteInt(42) + c.WriteBulk("mode") + c.WriteBulk("standalone") + c.WriteBulk("role") + c.WriteBulk("master") + c.WriteBulk("modules") + c.WriteLen(0) +} + +// ECHO +func (m *Miniredis) cmdEcho(c *server.Peer, cmd string, args []string) { + if len(args) != 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + msg := args[0] + c.WriteBulk(msg) + }) +} + +// SELECT +func (m *Miniredis) cmdSelect(c *server.Peer, cmd string, args []string) { + if len(args) != 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + id, err := strconv.Atoi(args[0]) + if err != nil { + c.WriteError(msgInvalidInt) + setDirty(c) + return + } + if id < 0 { + c.WriteError("ERR DB index is out of range") + setDirty(c) + return + } + + ctx.selectedDB = id + c.WriteOK() + }) +} + +// SWAPDB +func (m *Miniredis) cmdSwapdb(c *server.Peer, cmd string, args []string) { + if len(args) != 2 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + id1, err := strconv.Atoi(args[0]) + if err != nil { + c.WriteError("ERR invalid first DB index") + setDirty(c) + return + } + id2, err := strconv.Atoi(args[1]) + if err != nil { + c.WriteError("ERR invalid second DB index") + setDirty(c) + return + } + if id1 < 0 || id2 < 0 { + c.WriteError("ERR DB index is out of range") + setDirty(c) + return + } + + m.swapDB(id1, id2) + + c.WriteOK() + }) +} + +// QUIT +func (m *Miniredis) cmdQuit(c *server.Peer, cmd string, args []string) { + // QUIT isn't transactionfied and accepts any arguments. + c.WriteOK() + c.Close() +} diff --git a/vendor/github.com/alicebob/miniredis/cmd_generic.go b/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go similarity index 86% rename from vendor/github.com/alicebob/miniredis/cmd_generic.go rename to vendor/github.com/alicebob/miniredis/v2/cmd_generic.go index fa394790..3859838c 100644 --- a/vendor/github.com/alicebob/miniredis/cmd_generic.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go @@ -3,17 +3,17 @@ package miniredis import ( - "math/rand" "strconv" "strings" "time" - "github.com/alicebob/miniredis/server" + "github.com/alicebob/miniredis/v2/server" ) // commandsGeneric handles EXPIRE, TTL, PERSIST, &c. func commandsGeneric(m *Miniredis) { m.srv.Register("DEL", m.cmdDel) + m.srv.Register("UNLINK", m.cmdDel) // DUMP m.srv.Register("EXISTS", m.cmdExists) m.srv.Register("EXPIRE", makeCmdExpire(m, false, time.Second)) @@ -31,6 +31,7 @@ func commandsGeneric(m *Miniredis) { m.srv.Register("RENAMENX", m.cmdRenamenx) // RESTORE // SORT + m.srv.Register("TOUCH", m.cmdTouch) m.srv.Register("TTL", m.cmdTTL) m.srv.Register("TYPE", m.cmdType) m.srv.Register("SCAN", m.cmdScan) @@ -49,6 +50,9 @@ func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] value := args[1] @@ -68,20 +72,7 @@ func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, return } if unix { - var ts time.Time - switch d { - case time.Millisecond: - ts = time.Unix(int64(i/1000), 1000000*int64(i%1000)) - case time.Second: - ts = time.Unix(int64(i), 0) - default: - panic("invalid time unit (d). Fixme!") - } - now := m.now - if now.IsZero() { - now = time.Now().UTC() - } - db.ttl[key] = ts.Sub(now) + db.ttl[key] = m.at(i, d) } else { db.ttl[key] = time.Duration(i) * d } @@ -92,6 +83,34 @@ func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, } } +// TOUCH +func (m *Miniredis) cmdTouch(c *server.Peer, cmd string, args []string) { + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + if len(args) == 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + count := 0 + for _, key := range args { + if db.exists(key) { + count++ + } + } + c.WriteInt(count) + }) +} + // TTL func (m *Miniredis) cmdTTL(c *server.Peer, cmd string, args []string) { if len(args) != 1 { @@ -102,6 +121,10 @@ func (m *Miniredis) cmdTTL(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } + key := args[0] withTx(m, c, func(c *server.Peer, ctx *connCtx) { @@ -133,6 +156,10 @@ func (m *Miniredis) cmdPTTL(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } + key := args[0] withTx(m, c, func(c *server.Peer, ctx *connCtx) { @@ -164,6 +191,10 @@ func (m *Miniredis) cmdPersist(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } + key := args[0] withTx(m, c, func(c *server.Peer, ctx *connCtx) { @@ -186,11 +217,20 @@ func (m *Miniredis) cmdPersist(c *server.Peer, cmd string, args []string) { }) } -// DEL +// DEL and UNLINK func (m *Miniredis) cmdDel(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } + + if len(args) == 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -216,6 +256,9 @@ func (m *Miniredis) cmdType(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -242,6 +285,9 @@ func (m *Miniredis) cmdExists(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -266,6 +312,9 @@ func (m *Miniredis) cmdMove(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] targetDB, err := strconv.Atoi(args[1]) @@ -299,13 +348,16 @@ func (m *Miniredis) cmdKeys(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - keys := matchKeys(db.allKeys(), key) + keys, _ := matchKeys(db.allKeys(), key) c.WriteLen(len(keys)) for _, s := range keys { c.WriteBulk(s) @@ -323,6 +375,9 @@ func (m *Miniredis) cmdRandomkey(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -331,7 +386,7 @@ func (m *Miniredis) cmdRandomkey(c *server.Peer, cmd string, args []string) { c.WriteNull() return } - nr := rand.Intn(len(db.keys)) + nr := m.randIntn(len(db.keys)) for k := range db.keys { if nr == 0 { c.WriteBulk(k) @@ -352,6 +407,9 @@ func (m *Miniredis) cmdRename(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } from, to := args[0], args[1] @@ -378,6 +436,9 @@ func (m *Miniredis) cmdRenamenx(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } from, to := args[0], args[1] @@ -409,6 +470,9 @@ func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } cursor, err := strconv.Atoi(args[0]) if err != nil { @@ -466,7 +530,7 @@ func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) { keys := db.allKeys() if withMatch { - keys = matchKeys(keys, match) + keys, _ = matchKeys(keys, match) } c.WriteLen(2) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_geo.go b/vendor/github.com/alicebob/miniredis/v2/cmd_geo.go new file mode 100644 index 00000000..a6c1901d --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_geo.go @@ -0,0 +1,601 @@ +// Commands from https://redis.io/commands#geo + +package miniredis + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/alicebob/miniredis/v2/server" +) + +// commandsGeo handles GEOADD, GEORADIUS etc. +func commandsGeo(m *Miniredis) { + m.srv.Register("GEOADD", m.cmdGeoadd) + m.srv.Register("GEODIST", m.cmdGeodist) + m.srv.Register("GEOPOS", m.cmdGeopos) + m.srv.Register("GEORADIUS", m.cmdGeoradius) + m.srv.Register("GEORADIUS_RO", m.cmdGeoradius) + m.srv.Register("GEORADIUSBYMEMBER", m.cmdGeoradiusbymember) + m.srv.Register("GEORADIUSBYMEMBER_RO", m.cmdGeoradiusbymember) +} + +// GEOADD +func (m *Miniredis) cmdGeoadd(c *server.Peer, cmd string, args []string) { + if len(args) < 3 || len(args[1:])%3 != 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + key, args := args[0], args[1:] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + if db.exists(key) && db.t(key) != "zset" { + c.WriteError(ErrWrongType.Error()) + return + } + + toSet := map[string]float64{} + for len(args) > 2 { + rawLong, rawLat, name := args[0], args[1], args[2] + args = args[3:] + longitude, err := strconv.ParseFloat(rawLong, 64) + if err != nil { + c.WriteError("ERR value is not a valid float") + return + } + latitude, err := strconv.ParseFloat(rawLat, 64) + if err != nil { + c.WriteError("ERR value is not a valid float") + return + } + + if latitude < -85.05112878 || + latitude > 85.05112878 || + longitude < -180 || + longitude > 180 { + c.WriteError(fmt.Sprintf("ERR invalid longitude,latitude pair %.6f,%.6f", longitude, latitude)) + return + } + + toSet[name] = float64(toGeohash(longitude, latitude)) + } + + set := 0 + for name, score := range toSet { + if db.ssetAdd(key, score, name) { + set++ + } + } + c.WriteInt(set) + }) +} + +// GEODIST +func (m *Miniredis) cmdGeodist(c *server.Peer, cmd string, args []string) { + if len(args) < 3 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + key, from, to, args := args[0], args[1], args[2], args[3:] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + if !db.exists(key) { + c.WriteNull() + return + } + if db.t(key) != "zset" { + c.WriteError(ErrWrongType.Error()) + return + } + + unit := "m" + if len(args) > 0 { + unit, args = args[0], args[1:] + } + if len(args) > 0 { + c.WriteError(msgSyntaxError) + return + } + + toMeter := parseUnit(unit) + if toMeter == 0 { + c.WriteError(msgUnsupportedUnit) + return + } + + members := db.sortedsetKeys[key] + fromD, okFrom := members.get(from) + toD, okTo := members.get(to) + if !okFrom || !okTo { + c.WriteNull() + return + } + + fromLo, fromLat := fromGeohash(uint64(fromD)) + toLo, toLat := fromGeohash(uint64(toD)) + + dist := distance(fromLat, fromLo, toLat, toLo) / toMeter + c.WriteBulk(fmt.Sprintf("%.4f", dist)) + }) +} + +// GEOPOS +func (m *Miniredis) cmdGeopos(c *server.Peer, cmd string, args []string) { + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + key, args := args[0], args[1:] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + if db.exists(key) && db.t(key) != "zset" { + c.WriteError(ErrWrongType.Error()) + return + } + + c.WriteLen(len(args)) + for _, l := range args { + if !db.ssetExists(key, l) { + c.WriteLen(-1) + continue + } + score := db.ssetScore(key, l) + c.WriteLen(2) + long, lat := fromGeohash(uint64(score)) + c.WriteBulk(fmt.Sprintf("%f", long)) + c.WriteBulk(fmt.Sprintf("%f", lat)) + } + }) +} + +type geoDistance struct { + Name string + Score float64 + Distance float64 + Longitude float64 + Latitude float64 +} + +// GEORADIUS and GEORADIUS_RO +func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { + if len(args) < 5 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + key := args[0] + longitude, err := strconv.ParseFloat(args[1], 64) + if err != nil { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + latitude, err := strconv.ParseFloat(args[2], 64) + if err != nil { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + radius, err := strconv.ParseFloat(args[3], 64) + if err != nil || radius < 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + toMeter := parseUnit(args[4]) + if toMeter == 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + args = args[5:] + + var ( + withDist = false + withCoord = false + direction = unsorted + count = 0 + withStore = false + storeKey = "" + withStoredist = false + storedistKey = "" + ) + for len(args) > 0 { + arg := args[0] + args = args[1:] + switch strings.ToUpper(arg) { + case "WITHCOORD": + withCoord = true + case "WITHDIST": + withDist = true + case "ASC": + direction = asc + case "DESC": + direction = desc + case "COUNT": + if len(args) == 0 { + setDirty(c) + c.WriteError("ERR syntax error") + return + } + n, err := strconv.Atoi(args[0]) + if err != nil { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } + if n <= 0 { + setDirty(c) + c.WriteError("ERR COUNT must be > 0") + return + } + args = args[1:] + count = n + case "STORE": + if len(args) == 0 { + setDirty(c) + c.WriteError("ERR syntax error") + return + } + withStore = true + storeKey = args[0] + args = args[1:] + case "STOREDIST": + if len(args) == 0 { + setDirty(c) + c.WriteError("ERR syntax error") + return + } + withStoredist = true + storedistKey = args[0] + args = args[1:] + default: + setDirty(c) + c.WriteError("ERR syntax error") + return + } + } + + if strings.ToUpper(cmd) == "GEORADIUS_RO" && (withStore || withStoredist) { + setDirty(c) + c.WriteError("ERR syntax error") + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + if (withStore || withStoredist) && (withDist || withCoord) { + c.WriteError("ERR STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options") + return + } + + db := m.db(ctx.selectedDB) + members := db.ssetElements(key) + + matches := withinRadius(members, longitude, latitude, radius*toMeter) + + // deal with ASC/DESC + if direction != unsorted { + sort.Slice(matches, func(i, j int) bool { + if direction == desc { + return matches[i].Distance > matches[j].Distance + } + return matches[i].Distance < matches[j].Distance + }) + } + + // deal with COUNT + if count > 0 && len(matches) > count { + matches = matches[:count] + } + + // deal with "STORE x" + if withStore { + db.del(storeKey, true) + for _, member := range matches { + db.ssetAdd(storeKey, member.Score, member.Name) + } + c.WriteInt(len(matches)) + return + } + + // deal with "STOREDIST x" + if withStoredist { + db.del(storedistKey, true) + for _, member := range matches { + db.ssetAdd(storedistKey, member.Distance/toMeter, member.Name) + } + c.WriteInt(len(matches)) + return + } + + c.WriteLen(len(matches)) + for _, member := range matches { + if !withDist && !withCoord { + c.WriteBulk(member.Name) + continue + } + + len := 1 + if withDist { + len++ + } + if withCoord { + len++ + } + c.WriteLen(len) + c.WriteBulk(member.Name) + if withDist { + c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/toMeter)) + } + if withCoord { + c.WriteLen(2) + c.WriteBulk(fmt.Sprintf("%f", member.Longitude)) + c.WriteBulk(fmt.Sprintf("%f", member.Latitude)) + } + } + }) +} + +// GEORADIUSBYMEMBER and GEORADIUSBYMEMBER_RO +func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []string) { + if len(args) < 4 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + key := args[0] + member := args[1] + + radius, err := strconv.ParseFloat(args[2], 64) + if err != nil || radius < 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + toMeter := parseUnit(args[3]) + if toMeter == 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + args = args[4:] + + var ( + withDist = false + withCoord = false + direction = unsorted + count = 0 + withStore = false + storeKey = "" + withStoredist = false + storedistKey = "" + ) + for len(args) > 0 { + arg := args[0] + args = args[1:] + switch strings.ToUpper(arg) { + case "WITHCOORD": + withCoord = true + case "WITHDIST": + withDist = true + case "ASC": + direction = asc + case "DESC": + direction = desc + case "COUNT": + if len(args) == 0 { + setDirty(c) + c.WriteError("ERR syntax error") + return + } + n, err := strconv.Atoi(args[0]) + if err != nil { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } + if n <= 0 { + setDirty(c) + c.WriteError("ERR COUNT must be > 0") + return + } + args = args[1:] + count = n + case "STORE": + if len(args) == 0 { + setDirty(c) + c.WriteError("ERR syntax error") + return + } + withStore = true + storeKey = args[0] + args = args[1:] + case "STOREDIST": + if len(args) == 0 { + setDirty(c) + c.WriteError("ERR syntax error") + return + } + withStoredist = true + storedistKey = args[0] + args = args[1:] + default: + setDirty(c) + c.WriteError("ERR syntax error") + return + } + } + + if strings.ToUpper(cmd) == "GEORADIUSBYMEMBER_RO" && (withStore || withStoredist) { + setDirty(c) + c.WriteError("ERR syntax error") + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + if (withStore || withStoredist) && (withDist || withCoord) { + c.WriteError("ERR STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options") + return + } + + db := m.db(ctx.selectedDB) + if !db.exists(key) { + c.WriteNull() + return + } + + if db.t(key) != "zset" { + c.WriteError(ErrWrongType.Error()) + return + } + + // get position of member + if !db.ssetExists(key, member) { + c.WriteError("ERR could not decode requested zset member") + return + } + score := db.ssetScore(key, member) + longitude, latitude := fromGeohash(uint64(score)) + + members := db.ssetElements(key) + matches := withinRadius(members, longitude, latitude, radius*toMeter) + + // deal with ASC/DESC + if direction != unsorted { + sort.Slice(matches, func(i, j int) bool { + if direction == desc { + return matches[i].Distance > matches[j].Distance + } + return matches[i].Distance < matches[j].Distance + }) + } + + // deal with COUNT + if count > 0 && len(matches) > count { + matches = matches[:count] + } + + // deal with "STORE x" + if withStore { + db.del(storeKey, true) + for _, member := range matches { + db.ssetAdd(storeKey, member.Score, member.Name) + } + c.WriteInt(len(matches)) + return + } + + // deal with "STOREDIST x" + if withStoredist { + db.del(storedistKey, true) + for _, member := range matches { + db.ssetAdd(storedistKey, member.Distance/toMeter, member.Name) + } + c.WriteInt(len(matches)) + return + } + + c.WriteLen(len(matches)) + for _, member := range matches { + if !withDist && !withCoord { + c.WriteBulk(member.Name) + continue + } + + len := 1 + if withDist { + len++ + } + if withCoord { + len++ + } + c.WriteLen(len) + c.WriteBulk(member.Name) + if withDist { + c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/toMeter)) + } + if withCoord { + c.WriteLen(2) + c.WriteBulk(fmt.Sprintf("%f", member.Longitude)) + c.WriteBulk(fmt.Sprintf("%f", member.Latitude)) + } + } + }) +} + +func withinRadius(members []ssElem, longitude, latitude, radius float64) []geoDistance { + matches := []geoDistance{} + for _, el := range members { + elLo, elLat := fromGeohash(uint64(el.score)) + distanceInMeter := distance(latitude, longitude, elLat, elLo) + + if distanceInMeter <= radius { + matches = append(matches, geoDistance{ + Name: el.member, + Score: el.score, + Distance: distanceInMeter, + Longitude: elLo, + Latitude: elLat, + }) + } + } + return matches +} + +func parseUnit(u string) float64 { + switch u { + case "m": + return 1 + case "km": + return 1000 + case "mi": + return 1609.34 + case "ft": + return 0.3048 + default: + return 0 + } +} diff --git a/vendor/github.com/alicebob/miniredis/cmd_hash.go b/vendor/github.com/alicebob/miniredis/v2/cmd_hash.go similarity index 85% rename from vendor/github.com/alicebob/miniredis/cmd_hash.go rename to vendor/github.com/alicebob/miniredis/v2/cmd_hash.go index 1c65ebec..142ba63e 100644 --- a/vendor/github.com/alicebob/miniredis/cmd_hash.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_hash.go @@ -3,10 +3,11 @@ package miniredis import ( + "math/big" "strconv" "strings" - "github.com/alicebob/miniredis/server" + "github.com/alicebob/miniredis/v2/server" ) // commandsHash handles all hash value operations. @@ -23,13 +24,14 @@ func commandsHash(m *Miniredis) { m.srv.Register("HMSET", m.cmdHmset) m.srv.Register("HSET", m.cmdHset) m.srv.Register("HSETNX", m.cmdHsetnx) + m.srv.Register("HSTRLEN", m.cmdHstrlen) m.srv.Register("HVALS", m.cmdHvals) m.srv.Register("HSCAN", m.cmdHscan) } // HSET func (m *Miniredis) cmdHset(c *server.Peer, cmd string, args []string) { - if len(args) != 3 { + if len(args) < 3 { setDirty(c) c.WriteError(errWrongNumber(cmd)) return @@ -37,22 +39,27 @@ func (m *Miniredis) cmdHset(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } - key, field, value := args[0], args[1], args[2] + key, pairs := args[0], args[1:] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) + if len(pairs)%2 == 1 { + c.WriteError(errWrongNumber(cmd)) + return + } + if t, ok := db.keys[key]; ok && t != "hash" { c.WriteError(msgWrongType) return } - if db.hashSet(key, field, value) { - c.WriteInt(0) - } else { - c.WriteInt(1) - } + new := db.hashSet(key, pairs...) + c.WriteInt(new) }) } @@ -66,6 +73,9 @@ func (m *Miniredis) cmdHsetnx(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, field, value := args[0], args[1], args[2] @@ -102,12 +112,14 @@ func (m *Miniredis) cmdHmset(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, args := args[0], args[1:] if len(args)%2 != 0 { setDirty(c) - // non-default error message - c.WriteError("ERR wrong number of arguments for HMSET") + c.WriteError(errWrongNumber(cmd)) return } @@ -138,6 +150,9 @@ func (m *Miniredis) cmdHget(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, field := args[0], args[1] @@ -172,6 +187,9 @@ func (m *Miniredis) cmdHdel(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, fields := args[0], args[1:] @@ -217,6 +235,9 @@ func (m *Miniredis) cmdHexists(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, field := args[0], args[1] @@ -251,6 +272,9 @@ func (m *Miniredis) cmdHgetall(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -259,7 +283,7 @@ func (m *Miniredis) cmdHgetall(c *server.Peer, cmd string, args []string) { t, ok := db.keys[key] if !ok { - c.WriteLen(0) + c.WriteMapLen(0) return } if t != "hash" { @@ -267,7 +291,7 @@ func (m *Miniredis) cmdHgetall(c *server.Peer, cmd string, args []string) { return } - c.WriteLen(len(db.hashKeys[key]) * 2) + c.WriteMapLen(len(db.hashKeys[key])) for _, k := range db.hashFields(key) { c.WriteBulk(k) c.WriteBulk(db.hashGet(key, k)) @@ -285,6 +309,9 @@ func (m *Miniredis) cmdHkeys(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -308,6 +335,40 @@ func (m *Miniredis) cmdHkeys(c *server.Peer, cmd string, args []string) { }) } +// HSTRLEN +func (m *Miniredis) cmdHstrlen(c *server.Peer, cmd string, args []string) { + if len(args) != 2 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + hash, key := args[0], args[1] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + t, ok := db.keys[hash] + if !ok { + c.WriteInt(0) + return + } + if t != "hash" { + c.WriteError(msgWrongType) + return + } + + keys := db.hashKeys[hash] + c.WriteInt(len(keys[key])) + }) +} + // HVALS func (m *Miniredis) cmdHvals(c *server.Peer, cmd string, args []string) { if len(args) != 1 { @@ -318,6 +379,9 @@ func (m *Miniredis) cmdHvals(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -334,8 +398,9 @@ func (m *Miniredis) cmdHvals(c *server.Peer, cmd string, args []string) { return } - c.WriteLen(len(db.hashKeys[key])) - for _, v := range db.hashKeys[key] { + vals := db.hashValues(key) + c.WriteLen(len(vals)) + for _, v := range vals { c.WriteBulk(v) } }) @@ -351,6 +416,9 @@ func (m *Miniredis) cmdHlen(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -381,6 +449,9 @@ func (m *Miniredis) cmdHmget(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -419,6 +490,9 @@ func (m *Miniredis) cmdHincrby(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, field, deltas := args[0], args[1], args[2] @@ -456,10 +530,13 @@ func (m *Miniredis) cmdHincrbyfloat(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, field, deltas := args[0], args[1], args[2] - delta, err := strconv.ParseFloat(deltas, 64) + delta, _, err := big.ParseFloat(deltas, 10, 128, 0) if err != nil { setDirty(c) c.WriteError(msgInvalidFloat) @@ -479,7 +556,7 @@ func (m *Miniredis) cmdHincrbyfloat(c *server.Peer, cmd string, args []string) { c.WriteError(err.Error()) return } - c.WriteBulk(formatFloat(v)) + c.WriteBulk(formatBig(v)) }) } @@ -493,6 +570,9 @@ func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] cursor, err := strconv.Atoi(args[1]) @@ -556,7 +636,7 @@ func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) { members := db.hashFields(key) if withMatch { - members = matchKeys(members, match) + members, _ = matchKeys(members, match) } c.WriteLen(2) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_hll.go b/vendor/github.com/alicebob/miniredis/v2/cmd_hll.go new file mode 100644 index 00000000..bd2f90c8 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_hll.go @@ -0,0 +1,95 @@ +package miniredis + +import "github.com/alicebob/miniredis/v2/server" + +// commandsHll handles all hll related operations. +func commandsHll(m *Miniredis) { + m.srv.Register("PFADD", m.cmdPfadd) + m.srv.Register("PFCOUNT", m.cmdPfcount) + m.srv.Register("PFMERGE", m.cmdPfmerge) +} + +// PFADD +func (m *Miniredis) cmdPfadd(c *server.Peer, cmd string, args []string) { + if len(args) < 2 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + key, items := args[0], args[1:] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + if db.exists(key) && db.t(key) != "hll" { + c.WriteError(ErrNotValidHllValue.Error()) + return + } + + altered := db.hllAdd(key, items...) + c.WriteInt(altered) + }) +} + +// PFCOUNT +func (m *Miniredis) cmdPfcount(c *server.Peer, cmd string, args []string) { + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + keys := args[:] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + count, err := db.hllCount(keys) + if err != nil { + c.WriteError(err.Error()) + return + } + + c.WriteInt(count) + }) +} + +// PFMERGE +func (m *Miniredis) cmdPfmerge(c *server.Peer, cmd string, args []string) { + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + keys := args + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + if err := db.hllMerge(keys); err != nil { + c.WriteError(err.Error()) + return + } + c.WriteOK() + }) +} diff --git a/vendor/github.com/alicebob/miniredis/cmd_list.go b/vendor/github.com/alicebob/miniredis/v2/cmd_list.go similarity index 88% rename from vendor/github.com/alicebob/miniredis/cmd_list.go rename to vendor/github.com/alicebob/miniredis/v2/cmd_list.go index ae543dc6..364305c2 100644 --- a/vendor/github.com/alicebob/miniredis/cmd_list.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_list.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/alicebob/miniredis/server" + "github.com/alicebob/miniredis/v2/server" ) type leftright int @@ -57,6 +57,10 @@ func (m *Miniredis) cmdBXpop(c *server.Peer, cmd string, args []string, lr leftr if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } + timeoutS := args[len(args)-1] keys := args[:len(args)-1] @@ -106,7 +110,7 @@ func (m *Miniredis) cmdBXpop(c *server.Peer, cmd string, args []string, lr leftr }, func(c *server.Peer) { // timeout - c.WriteNull() + c.WriteLen(-1) }, ) } @@ -121,11 +125,14 @@ func (m *Miniredis) cmdLindex(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, offsets := args[0], args[1] offset, err := strconv.Atoi(offsets) - if err != nil { + if err != nil || offsets == "-0" { setDirty(c) c.WriteError(msgInvalidInt) return @@ -167,6 +174,9 @@ func (m *Miniredis) cmdLinsert(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] where := 0 @@ -231,6 +241,9 @@ func (m *Miniredis) cmdLlen(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -263,7 +276,7 @@ func (m *Miniredis) cmdRpop(c *server.Peer, cmd string, args []string) { } func (m *Miniredis) cmdXpop(c *server.Peer, cmd string, args []string, lr leftright) { - if len(args) != 1 { + if len(args) < 1 { setDirty(c) c.WriteError(errWrongNumber(cmd)) return @@ -271,28 +284,77 @@ func (m *Miniredis) cmdXpop(c *server.Peer, cmd string, args []string, lr leftri if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } - key := args[0] + var opts struct { + key string + withCount bool + count int + } + + opts.key, args = args[0], args[1:] + if len(args) > 0 { + v, err := strconv.Atoi(args[0]) + if err != nil { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } + if v < 0 { + setDirty(c) + c.WriteError(msgOutOfRange) + return + } + opts.count = v + opts.withCount = true + args = args[1:] + } + if len(args) > 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { // non-existing key is fine c.WriteNull() return } - if db.t(key) != "list" { + if db.t(opts.key) != "list" { c.WriteError(msgWrongType) return } + if opts.withCount { + var popped []string + for opts.count > 0 && len(db.listKeys[opts.key]) > 0 { + switch lr { + case left: + popped = append(popped, db.listLpop(opts.key)) + case right: + popped = append(popped, db.listPop(opts.key)) + } + opts.count -= 1 + } + if len(popped) == 0 { + c.WriteLen(-1) + } else { + c.WriteStrings(popped) + } + return + } + var elem string switch lr { case left: - elem = db.listLpop(key) + elem = db.listLpop(opts.key) case right: - elem = db.listPop(key) + elem = db.listPop(opts.key) } c.WriteBulk(elem) }) @@ -317,6 +379,9 @@ func (m *Miniredis) cmdXpush(c *server.Peer, cmd string, args []string, lr leftr if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, args := args[0], args[1:] @@ -360,6 +425,9 @@ func (m *Miniredis) cmdXpushx(c *server.Peer, cmd string, args []string, lr left if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, args := args[0], args[1:] @@ -398,6 +466,9 @@ func (m *Miniredis) cmdLrange(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] start, err := strconv.Atoi(args[1]) @@ -445,6 +516,9 @@ func (m *Miniredis) cmdLrem(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] count, err := strconv.Atoi(args[1]) @@ -514,6 +588,9 @@ func (m *Miniredis) cmdLset(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] index, err := strconv.Atoi(args[1]) @@ -561,6 +638,9 @@ func (m *Miniredis) cmdLtrim(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] start, err := strconv.Atoi(args[1]) @@ -612,6 +692,9 @@ func (m *Miniredis) cmdRpoplpush(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } src, dst := args[0], args[1] @@ -642,6 +725,9 @@ func (m *Miniredis) cmdBrpoplpush(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } src := args[0] dst := args[1] @@ -681,7 +767,7 @@ func (m *Miniredis) cmdBrpoplpush(c *server.Peer, cmd string, args []string) { }, func(c *server.Peer) { // timeout - c.WriteNull() + c.WriteLen(-1) }, ) } diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go b/vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go new file mode 100644 index 00000000..70997be5 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go @@ -0,0 +1,256 @@ +// Commands from https://redis.io/commands#pubsub + +package miniredis + +import ( + "fmt" + "strings" + + "github.com/alicebob/miniredis/v2/server" +) + +// commandsPubsub handles all PUB/SUB operations. +func commandsPubsub(m *Miniredis) { + m.srv.Register("SUBSCRIBE", m.cmdSubscribe) + m.srv.Register("UNSUBSCRIBE", m.cmdUnsubscribe) + m.srv.Register("PSUBSCRIBE", m.cmdPsubscribe) + m.srv.Register("PUNSUBSCRIBE", m.cmdPunsubscribe) + m.srv.Register("PUBLISH", m.cmdPublish) + m.srv.Register("PUBSUB", m.cmdPubSub) +} + +// SUBSCRIBE +func (m *Miniredis) cmdSubscribe(c *server.Peer, cmd string, args []string) { + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if getCtx(c).nested { + c.WriteError(msgNotFromScripts) + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + sub := m.subscribedState(c) + for _, channel := range args { + n := sub.Subscribe(channel) + c.Block(func(w *server.Writer) { + w.WritePushLen(3) + w.WriteBulk("subscribe") + w.WriteBulk(channel) + w.WriteInt(n) + }) + } + }) +} + +// UNSUBSCRIBE +func (m *Miniredis) cmdUnsubscribe(c *server.Peer, cmd string, args []string) { + if !m.handleAuth(c) { + return + } + if getCtx(c).nested { + c.WriteError(msgNotFromScripts) + return + } + + channels := args + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + sub := m.subscribedState(c) + + if len(channels) == 0 { + channels = sub.Channels() + } + + // there is no de-duplication + for _, channel := range channels { + n := sub.Unsubscribe(channel) + c.Block(func(w *server.Writer) { + w.WritePushLen(3) + w.WriteBulk("unsubscribe") + w.WriteBulk(channel) + w.WriteInt(n) + }) + } + if len(channels) == 0 { + // special case: there is always a reply + c.Block(func(w *server.Writer) { + w.WritePushLen(3) + w.WriteBulk("unsubscribe") + w.WriteNull() + w.WriteInt(0) + }) + } + + if sub.Count() == 0 { + endSubscriber(m, c) + } + }) +} + +// PSUBSCRIBE +func (m *Miniredis) cmdPsubscribe(c *server.Peer, cmd string, args []string) { + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if getCtx(c).nested { + c.WriteError(msgNotFromScripts) + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + sub := m.subscribedState(c) + for _, pat := range args { + n := sub.Psubscribe(pat) + c.Block(func(w *server.Writer) { + w.WritePushLen(3) + w.WriteBulk("psubscribe") + w.WriteBulk(pat) + w.WriteInt(n) + }) + } + }) +} + +// PUNSUBSCRIBE +func (m *Miniredis) cmdPunsubscribe(c *server.Peer, cmd string, args []string) { + if !m.handleAuth(c) { + return + } + if getCtx(c).nested { + c.WriteError(msgNotFromScripts) + return + } + + patterns := args + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + sub := m.subscribedState(c) + + if len(patterns) == 0 { + patterns = sub.Patterns() + } + + // there is no de-duplication + for _, pat := range patterns { + n := sub.Punsubscribe(pat) + c.Block(func(w *server.Writer) { + w.WritePushLen(3) + w.WriteBulk("punsubscribe") + w.WriteBulk(pat) + w.WriteInt(n) + }) + } + if len(patterns) == 0 { + // special case: there is always a reply + c.Block(func(w *server.Writer) { + w.WritePushLen(3) + w.WriteBulk("punsubscribe") + w.WriteNull() + w.WriteInt(0) + }) + } + + if sub.Count() == 0 { + endSubscriber(m, c) + } + }) +} + +// PUBLISH +func (m *Miniredis) cmdPublish(c *server.Peer, cmd string, args []string) { + if len(args) != 2 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + channel, mesg := args[0], args[1] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + c.WriteInt(m.publish(channel, mesg)) + }) +} + +// PUBSUB +func (m *Miniredis) cmdPubSub(c *server.Peer, cmd string, args []string) { + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + if m.checkPubsub(c, cmd) { + return + } + + subcommand := strings.ToUpper(args[0]) + subargs := args[1:] + var argsOk bool + + switch subcommand { + case "CHANNELS": + argsOk = len(subargs) < 2 + case "NUMSUB": + argsOk = true + case "NUMPAT": + argsOk = len(subargs) == 0 + default: + argsOk = false + } + + if !argsOk { + setDirty(c) + c.WriteError(fmt.Sprintf(msgFPubsubUsage, subcommand)) + return + } + + if !m.handleAuth(c) { + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + switch subcommand { + case "CHANNELS": + pat := "" + if len(subargs) == 1 { + pat = subargs[0] + } + + allsubs := m.allSubscribers() + channels := activeChannels(allsubs, pat) + + c.WriteLen(len(channels)) + for _, channel := range channels { + c.WriteBulk(channel) + } + + case "NUMSUB": + subs := m.allSubscribers() + c.WriteLen(len(subargs) * 2) + for _, channel := range subargs { + c.WriteBulk(channel) + c.WriteInt(countSubs(subs, channel)) + } + + case "NUMPAT": + c.WriteInt(countPsubs(m.allSubscribers())) + } + }) +} diff --git a/vendor/github.com/alicebob/miniredis/cmd_scripting.go b/vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go similarity index 72% rename from vendor/github.com/alicebob/miniredis/cmd_scripting.go rename to vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go index 296e61b9..ef10aaef 100644 --- a/vendor/github.com/alicebob/miniredis/cmd_scripting.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go @@ -9,10 +9,10 @@ import ( "strings" luajson "github.com/alicebob/gopher-json" - "github.com/yuin/gopher-lua" + lua "github.com/yuin/gopher-lua" "github.com/yuin/gopher-lua/parse" - "github.com/alicebob/miniredis/server" + "github.com/alicebob/miniredis/v2/server" ) func commandsScripting(m *Miniredis) { @@ -22,7 +22,8 @@ func commandsScripting(m *Miniredis) { } // Execute lua. Needs to run m.Lock()ed, from within withTx(). -func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) { +// Returns true if the lua was OK (and hence should be cached). +func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) bool { l := lua.NewState(lua.Options{SkipOpenLibs: true}) defer l.Close() @@ -37,6 +38,7 @@ func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) { {lua.TabLibName, lua.OpenTable}, {lua.StringLibName, lua.OpenString}, {lua.MathLibName, lua.OpenMath}, + {lua.DebugLibName, lua.OpenDebug}, } { if err := l.CallByParam(lua.P{ Fn: l.NewFunction(pair.f), @@ -50,26 +52,21 @@ func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) { luajson.Preload(l) requireGlobal(l, "cjson", "json") - m.Unlock() - conn := m.redigo() - m.Lock() - defer conn.Close() - // set global variable KEYS keysTable := l.NewTable() keysS, args := args[0], args[1:] keysLen, err := strconv.Atoi(keysS) if err != nil { c.WriteError(msgInvalidInt) - return + return false } if keysLen < 0 { c.WriteError(msgNegativeKeysNumber) - return + return false } if keysLen > len(args) { c.WriteError(msgInvalidKeysNumber) - return + return false } keys, args := args[:keysLen], args[keysLen:] for i, k := range keys { @@ -83,25 +80,29 @@ func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) { } l.SetGlobal("ARGV", argvTable) - redisFuncs := mkLuaFuncs(conn) + redisFuncs, redisConstants := mkLua(m.srv, c) // Register command handlers l.Push(l.NewFunction(func(l *lua.LState) int { mod := l.RegisterModule("redis", redisFuncs).(*lua.LTable) + for k, v := range redisConstants { + mod.RawSetString(k, v) + } l.Push(mod) return 1 })) + l.DoString(protectGlobals) + l.Push(lua.LString("redis")) l.Call(1, 0) - m.Unlock() // This runs in a transaction, but can access our db recursively - defer m.Lock() if err := l.DoString(script); err != nil { c.WriteError(errLuaParseError(err)) - return + return false } luaToRedis(l, c, l.Get(1)) + return true } func (m *Miniredis) cmdEval(c *server.Peer, cmd string, args []string) { @@ -113,10 +114,23 @@ func (m *Miniredis) cmdEval(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } + + if getCtx(c).nested { + c.WriteError(msgNotFromScripts) + return + } + script, args := args[0], args[1:] withTx(m, c, func(c *server.Peer, ctx *connCtx) { - m.runLuaScript(c, script, args) + ok := m.runLuaScript(c, script, args) + if ok { + sha := sha1Hex(script) + m.scripts[sha] = script + } }) } @@ -129,6 +143,13 @@ func (m *Miniredis) cmdEvalsha(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } + if getCtx(c).nested { + c.WriteError(msgNotFromScripts) + return + } sha, args := args[0], args[1:] @@ -152,6 +173,14 @@ func (m *Miniredis) cmdScript(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } + + if getCtx(c).nested { + c.WriteError(msgNotFromScripts) + return + } subcmd, args := args[0], args[1:] @@ -183,8 +212,15 @@ func (m *Miniredis) cmdScript(c *server.Peer, cmd string, args []string) { } case "flush": + if len(args) == 1 { + switch strings.ToUpper(args[0]) { + case "SYNC", "ASYNC": + args = args[1:] + default: + } + } if len(args) != 0 { - c.WriteError(fmt.Sprintf(msgFScriptUsage, "FLUSH")) + c.WriteError(msgScriptFlush) return } @@ -218,3 +254,28 @@ func requireGlobal(l *lua.LState, id, modName string) { l.SetGlobal(id, mod) } + +// the following script protects globals +// it is based on: http://metalua.luaforge.net/src/lib/strict.lua.html +var protectGlobals = ` +local dbg=debug +local mt = {} +setmetatable(_G, mt) +mt.__newindex = function (t, n, v) + if dbg.getinfo(2) then + local w = dbg.getinfo(2, "S").what + if w ~= "C" then + error("Script attempted to create global variable '"..tostring(n).."'", 2) + end + end + rawset(t, n, v) +end +mt.__index = function (t, n) + if dbg.getinfo(2) and dbg.getinfo(2, "S").what ~= "C" then + error("Script attempted to access nonexistent global variable '"..tostring(n).."'", 2) + end + return rawget(t, n) +end +debug = nil + +` diff --git a/vendor/github.com/alicebob/miniredis/cmd_server.go b/vendor/github.com/alicebob/miniredis/v2/cmd_server.go similarity index 88% rename from vendor/github.com/alicebob/miniredis/cmd_server.go rename to vendor/github.com/alicebob/miniredis/v2/cmd_server.go index c021644c..223651d3 100644 --- a/vendor/github.com/alicebob/miniredis/cmd_server.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_server.go @@ -5,9 +5,8 @@ package miniredis import ( "strconv" "strings" - "time" - "github.com/alicebob/miniredis/server" + "github.com/alicebob/miniredis/v2/server" ) func commandsServer(m *Miniredis) { @@ -27,6 +26,9 @@ func (m *Miniredis) cmdDbsize(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -45,10 +47,12 @@ func (m *Miniredis) cmdFlushall(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { m.flushAll() @@ -66,10 +70,12 @@ func (m *Miniredis) cmdFlushdb(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { m.db(ctx.selectedDB).flush() @@ -77,7 +83,7 @@ func (m *Miniredis) cmdFlushdb(c *server.Peer, cmd string, args []string) { }) } -// TIME: time values are returned in string format instead of int +// TIME func (m *Miniredis) cmdTime(c *server.Peer, cmd string, args []string) { if len(args) > 0 { setDirty(c) @@ -87,12 +93,12 @@ func (m *Miniredis) cmdTime(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { - now := m.now - if now.IsZero() { - now = time.Now() - } + now := m.effectiveNow() nanos := now.UnixNano() seconds := nanos / 1000000000 microseconds := (nanos / 1000) % 1000000 diff --git a/vendor/github.com/alicebob/miniredis/cmd_set.go b/vendor/github.com/alicebob/miniredis/v2/cmd_set.go similarity index 86% rename from vendor/github.com/alicebob/miniredis/cmd_set.go rename to vendor/github.com/alicebob/miniredis/v2/cmd_set.go index 2220cf55..a9cdf411 100644 --- a/vendor/github.com/alicebob/miniredis/cmd_set.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_set.go @@ -3,11 +3,10 @@ package miniredis import ( - "math/rand" "strconv" "strings" - "github.com/alicebob/miniredis/server" + "github.com/alicebob/miniredis/v2/server" ) // commandsSet handles all set value operations. @@ -39,6 +38,9 @@ func (m *Miniredis) cmdSadd(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, elems := args[0], args[1:] @@ -65,6 +67,9 @@ func (m *Miniredis) cmdScard(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -96,6 +101,9 @@ func (m *Miniredis) cmdSdiff(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } keys := args @@ -108,7 +116,7 @@ func (m *Miniredis) cmdSdiff(c *server.Peer, cmd string, args []string) { return } - c.WriteLen(len(set)) + c.WriteSetLen(len(set)) for k := range set { c.WriteBulk(k) } @@ -125,6 +133,9 @@ func (m *Miniredis) cmdSdiffstore(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } dest, keys := args[0], args[1:] @@ -153,6 +164,9 @@ func (m *Miniredis) cmdSinter(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } keys := args @@ -182,6 +196,9 @@ func (m *Miniredis) cmdSinterstore(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } dest, keys := args[0], args[1:] @@ -210,6 +227,9 @@ func (m *Miniredis) cmdSismember(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, value := args[0], args[1] @@ -244,6 +264,9 @@ func (m *Miniredis) cmdSmembers(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -251,7 +274,7 @@ func (m *Miniredis) cmdSmembers(c *server.Peer, cmd string, args []string) { db := m.db(ctx.selectedDB) if !db.exists(key) { - c.WriteLen(0) + c.WriteSetLen(0) return } @@ -262,7 +285,7 @@ func (m *Miniredis) cmdSmembers(c *server.Peer, cmd string, args []string) { members := db.setMembers(key) - c.WriteLen(len(members)) + c.WriteSetLen(len(members)) for _, elem := range members { c.WriteBulk(elem) } @@ -279,6 +302,9 @@ func (m *Miniredis) cmdSmove(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } src, dst, member := args[0], args[1], args[2] @@ -320,33 +346,46 @@ func (m *Miniredis) cmdSpop(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } - key, args := args[0], args[1:] - - withTx(m, c, func(c *server.Peer, ctx *connCtx) { - db := m.db(ctx.selectedDB) + opts := struct { + key string + withCount bool + count int + }{ + count: 1, + } + opts.key, args = args[0], args[1:] - withCount := false - count := 1 - if len(args) > 0 { - v, err := strconv.Atoi(args[0]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) - return - } - count = v - withCount = true - args = args[1:] - } - if len(args) > 0 { + if len(args) > 0 { + v, err := strconv.Atoi(args[0]) + if err != nil { setDirty(c) c.WriteError(msgInvalidInt) return } + if v < 0 { + setDirty(c) + c.WriteError(msgOutOfRange) + return + } + opts.count = v + opts.withCount = true + args = args[1:] + } + if len(args) > 0 { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } - if !db.exists(key) { - if !withCount { + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + if !db.exists(opts.key) { + if !opts.withCount { c.WriteNull() return } @@ -354,23 +393,23 @@ func (m *Miniredis) cmdSpop(c *server.Peer, cmd string, args []string) { return } - if db.t(key) != "set" { + if db.t(opts.key) != "set" { c.WriteError(ErrWrongType.Error()) return } var deleted []string - for i := 0; i < count; i++ { - members := db.setMembers(key) + for i := 0; i < opts.count; i++ { + members := db.setMembers(opts.key) if len(members) == 0 { break } - member := members[rand.Intn(len(members))] - db.setRem(key, member) + member := members[m.randIntn(len(members))] + db.setRem(opts.key, member) deleted = append(deleted, member) } - // without `count` return a single value... - if !withCount { + // without `count` return a single value + if !opts.withCount { if len(deleted) == 0 { c.WriteNull() return @@ -378,7 +417,7 @@ func (m *Miniredis) cmdSpop(c *server.Peer, cmd string, args []string) { c.WriteBulk(deleted[0]) return } - // ... with `count` return a list + // with `count` return a list c.WriteLen(len(deleted)) for _, v := range deleted { c.WriteBulk(v) @@ -401,6 +440,9 @@ func (m *Miniredis) cmdSrandmember(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] count := 0 @@ -434,7 +476,7 @@ func (m *Miniredis) cmdSrandmember(c *server.Peer, cmd string, args []string) { // Non-unique elements is allowed with negative count. c.WriteLen(-count) for count != 0 { - member := members[rand.Intn(len(members))] + member := members[m.randIntn(len(members))] c.WriteBulk(member) count++ } @@ -442,7 +484,7 @@ func (m *Miniredis) cmdSrandmember(c *server.Peer, cmd string, args []string) { } // Must be unique elements. - shuffle(members) + m.shuffle(members) if count > len(members) { count = len(members) } @@ -467,6 +509,9 @@ func (m *Miniredis) cmdSrem(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, fields := args[0], args[1:] @@ -497,6 +542,9 @@ func (m *Miniredis) cmdSunion(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } keys := args @@ -526,6 +574,9 @@ func (m *Miniredis) cmdSunionstore(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } dest, keys := args[0], args[1:] @@ -554,6 +605,9 @@ func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] cursor, err := strconv.Atoi(args[1]) @@ -617,7 +671,7 @@ func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) { members := db.setMembers(key) if withMatch { - members = matchKeys(members, match) + members, _ = matchKeys(members, match) } c.WriteLen(2) @@ -628,12 +682,3 @@ func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) { } }) } - -// shuffle shuffles a string. Kinda. -func shuffle(m []string) { - for _ = range m { - i := rand.Intn(len(m)) - j := rand.Intn(len(m)) - m[i], m[j] = m[j], m[i] - } -} diff --git a/vendor/github.com/alicebob/miniredis/cmd_sorted_set.go b/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go similarity index 87% rename from vendor/github.com/alicebob/miniredis/cmd_sorted_set.go rename to vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go index 5252b015..59b18200 100644 --- a/vendor/github.com/alicebob/miniredis/cmd_sorted_set.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/alicebob/miniredis/server" + "github.com/alicebob/miniredis/v2/server" ) var ( @@ -40,6 +40,7 @@ func commandsSortedSet(m *Miniredis) { m.srv.Register("ZSCAN", m.cmdZscan) m.srv.Register("ZPOPMAX", m.cmdZpopmax(true)) m.srv.Register("ZPOPMIN", m.cmdZpopmax(false)) + m.srv.Register("ZRANDMEMBER", m.cmdZrandmember) } // ZADD @@ -52,6 +53,9 @@ func (m *Miniredis) cmdZadd(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, args := args[0], args[1:] var ( @@ -133,7 +137,7 @@ outer: return } newScore := db.ssetIncrby(key, member, delta) - c.WriteBulk(formatFloat(newScore)) + c.WriteFloat(newScore) } return } @@ -170,6 +174,9 @@ func (m *Miniredis) cmdZcard(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -200,6 +207,9 @@ func (m *Miniredis) cmdZcount(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] min, minIncl, err := parseFloatRange(args[1]) @@ -244,6 +254,9 @@ func (m *Miniredis) cmdZincrby(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] delta, err := strconv.ParseFloat(args[1], 64) @@ -262,7 +275,7 @@ func (m *Miniredis) cmdZincrby(c *server.Peer, cmd string, args []string) { return } newScore := db.ssetIncrby(key, member, delta) - c.WriteBulk(formatFloat(newScore)) + c.WriteFloat(newScore) }) } @@ -276,6 +289,9 @@ func (m *Miniredis) cmdZinterstore(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } destination := args[0] numKeys, err := strconv.Atoi(args[1]) @@ -354,33 +370,42 @@ func (m *Miniredis) cmdZinterstore(c *server.Peer, cmd string, args []string) { if !db.exists(key) { continue } - if db.t(key) != "zset" { + + var set map[string]float64 + switch db.t(key) { + case "set": + set = map[string]float64{} + for elem := range db.setKeys[key] { + set[elem] = 1.0 + } + case "zset": + set = db.sortedSet(key) + default: c.WriteError(msgWrongType) return } - for _, el := range db.ssetElements(key) { - score := el.score + for member, score := range set { if withWeights { score *= weights[i] } - counts[el.member]++ - old, ok := sset[el.member] + counts[member]++ + old, ok := sset[member] if !ok { - sset[el.member] = score + sset[member] = score continue } switch aggregate { default: panic("Invalid aggregate") case "sum": - sset[el.member] += score + sset[member] += score case "min": if score < old { - sset[el.member] = score + sset[member] = score } case "max": if score > old { - sset[el.member] = score + sset[member] = score } } } @@ -405,6 +430,9 @@ func (m *Miniredis) cmdZlexcount(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] min, minIncl, err := parseLexrange(args[1]) @@ -453,6 +481,9 @@ func (m *Miniredis) makeCmdZrange(reverse bool) server.Cmd { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] start, err := strconv.Atoi(args[1]) @@ -508,7 +539,7 @@ func (m *Miniredis) makeCmdZrange(reverse bool) server.Cmd { for _, el := range members[rs:re] { c.WriteBulk(el) if withScores { - c.WriteBulk(formatFloat(db.ssetScore(key, el))) + c.WriteFloat(db.ssetScore(key, el)) } } }) @@ -526,6 +557,9 @@ func (m *Miniredis) makeCmdZrangebylex(reverse bool) server.Cmd { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] min, minIncl, err := parseLexrange(args[1]) @@ -637,6 +671,9 @@ func (m *Miniredis) makeCmdZrangebyscore(reverse bool) server.Cmd { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] min, minIncl, err := parseFloatRange(args[1]) @@ -740,7 +777,7 @@ func (m *Miniredis) makeCmdZrangebyscore(reverse bool) server.Cmd { for _, el := range members { c.WriteBulk(el.member) if withScores { - c.WriteBulk(formatFloat(el.score)) + c.WriteFloat(el.score) } } }) @@ -758,6 +795,9 @@ func (m *Miniredis) makeCmdZrank(reverse bool) server.Cmd { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, member := args[0], args[1] @@ -798,6 +838,9 @@ func (m *Miniredis) cmdZrem(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, members := args[0], args[1:] @@ -834,6 +877,9 @@ func (m *Miniredis) cmdZremrangebylex(c *server.Peer, cmd string, args []string) if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] min, minIncl, err := parseLexrange(args[1]) @@ -884,6 +930,9 @@ func (m *Miniredis) cmdZremrangebyrank(c *server.Peer, cmd string, args []string if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] start, err := strconv.Atoi(args[1]) @@ -931,6 +980,9 @@ func (m *Miniredis) cmdZremrangebyscore(c *server.Peer, cmd string, args []strin if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] min, minIncl, err := parseFloatRange(args[1]) @@ -979,6 +1031,9 @@ func (m *Miniredis) cmdZscore(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, member := args[0], args[1] @@ -1000,7 +1055,7 @@ func (m *Miniredis) cmdZscore(c *server.Peer, cmd string, args []string) { return } - c.WriteBulk(formatFloat(db.ssetScore(key, member))) + c.WriteFloat(db.ssetScore(key, member)) }) } @@ -1080,10 +1135,12 @@ func withLexRange(members []string, min string, minIncl bool, max string, maxInc return nil } if min != "-" { + found := false if minIncl { for i, m := range members { if m >= min { members = members[i:] + found = true break } } @@ -1092,10 +1149,14 @@ func withLexRange(members []string, min string, minIncl bool, max string, maxInc for i, m := range members { if m > min { members = members[i:] + found = true break } } } + if !found { + return nil + } } if max != "+" { if maxIncl { @@ -1128,6 +1189,9 @@ func (m *Miniredis) cmdZunionstore(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } destination := args[0] numKeys, err := strconv.Atoi(args[1]) @@ -1211,32 +1275,42 @@ func (m *Miniredis) cmdZunionstore(c *server.Peer, cmd string, args []string) { if !db.exists(key) { continue } - if db.t(key) != "zset" { + + var set map[string]float64 + switch db.t(key) { + case "set": + set = map[string]float64{} + for elem := range db.setKeys[key] { + set[elem] = 1.0 + } + case "zset": + set = db.sortedSet(key) + default: c.WriteError(msgWrongType) return } - for _, el := range db.ssetElements(key) { - score := el.score + + for member, score := range set { if withWeights { score *= weights[i] } - old, ok := sset[el.member] + old, ok := sset[member] if !ok { - sset[el.member] = score + sset[member] = score continue } switch aggregate { default: panic("Invalid aggregate") case "sum": - sset[el.member] += score + sset[member] += score case "min": if score < old { - sset[el.member] = score + sset[member] = score } case "max": if score > old { - sset[el.member] = score + sset[member] = score } } } @@ -1256,6 +1330,9 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] cursor, err := strconv.Atoi(args[1]) @@ -1303,8 +1380,7 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - // We return _all_ (matched) keys every time. - + // Paging is not implementend, all results are returned for cursor 0. if cursor != 0 { // Invalid cursor. c.WriteLen(2) @@ -1319,7 +1395,7 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { members := db.ssetMembers(key) if withMatch { - members = matchKeys(members, match) + members, _ = matchKeys(members, match) } c.WriteLen(2) @@ -1328,7 +1404,7 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { c.WriteLen(len(members) * 2) for _, k := range members { c.WriteBulk(k) - c.WriteBulk(formatFloat(db.ssetScore(key, k))) + c.WriteFloat(db.ssetScore(key, k)) } }) } @@ -1390,10 +1466,111 @@ func (m *Miniredis) cmdZpopmax(reverse bool) server.Cmd { for _, el := range members[rs:re] { c.WriteBulk(el) if withScores { - c.WriteBulk(formatFloat(db.ssetScore(key, el))) + c.WriteFloat(db.ssetScore(key, el)) } db.ssetRem(key, el) } }) } } + +// ZRANDMEMBER +func (m *Miniredis) cmdZrandmember(c *server.Peer, cmd string, args []string) { + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + var opts struct { + key string + withCount bool + count int + withScores bool + } + + opts.key = args[0] + args = args[1:] + + if len(args) > 0 { + count := args[0] + args = args[1:] + + n, err := strconv.Atoi(count) + if err != nil { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } + opts.withCount = true + opts.count = n // can be negative + } + + if len(args) > 0 && strings.ToUpper(args[0]) == "WITHSCORES" { + opts.withScores = true + args = args[1:] + } + + if len(args) > 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + if !db.exists(opts.key) { + c.WriteNull() + return + } + + if db.t(opts.key) != "zset" { + c.WriteError(ErrWrongType.Error()) + return + } + + if !opts.withCount { + member := db.ssetRandomMember(opts.key) + if member == "" { + c.WriteNull() + return + } + c.WriteBulk(member) + return + } + + var members []string + switch { + case opts.count == 0: + c.WriteStrings(nil) + return + case opts.count > 0: + allMembers := db.ssetMembers(opts.key) + db.master.shuffle(allMembers) + if len(allMembers) > opts.count { + allMembers = allMembers[:opts.count] + } + members = allMembers + case opts.count < 0: + for i := 0; i < -opts.count; i++ { + members = append(members, db.ssetRandomMember(opts.key)) + } + } + if opts.withScores { + c.WriteLen(len(members) * 2) + for _, m := range members { + c.WriteBulk(m) + c.WriteFloat(db.ssetScore(opts.key, m)) + } + return + } + c.WriteStrings(members) + }) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go b/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go new file mode 100644 index 00000000..a1af8da9 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go @@ -0,0 +1,1082 @@ +// Commands from https://redis.io/commands#stream + +package miniredis + +import ( + "errors" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/alicebob/miniredis/v2/server" +) + +// commandsStream handles all stream operations. +func commandsStream(m *Miniredis) { + m.srv.Register("XADD", m.cmdXadd) + m.srv.Register("XLEN", m.cmdXlen) + m.srv.Register("XREAD", m.cmdXread) + m.srv.Register("XRANGE", m.makeCmdXrange(false)) + m.srv.Register("XREVRANGE", m.makeCmdXrange(true)) + m.srv.Register("XGROUP", m.cmdXgroup) + m.srv.Register("XINFO", m.cmdXinfo) + m.srv.Register("XREADGROUP", m.cmdXreadgroup) + m.srv.Register("XACK", m.cmdXack) + m.srv.Register("XDEL", m.cmdXdel) + m.srv.Register("XPENDING", m.cmdXpending) + m.srv.Register("XTRIM", m.cmdXtrim) +} + +// XADD +func (m *Miniredis) cmdXadd(c *server.Peer, cmd string, args []string) { + if len(args) < 4 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + key, args := args[0], args[1:] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + + maxlen := -1 + if strings.ToLower(args[0]) == "maxlen" { + args = args[1:] + // we don't treat "~" special + if args[0] == "~" { + args = args[1:] + } + n, err := strconv.Atoi(args[0]) + if err != nil { + c.WriteError(msgInvalidInt) + return + } + if n < 0 { + c.WriteError("ERR The MAXLEN argument must be >= 0.") + return + } + maxlen = n + args = args[1:] + } + if len(args) < 1 { + c.WriteError(errWrongNumber(cmd)) + return + } + entryID, args := args[0], args[1:] + + // args must be composed of field/value pairs. + if len(args) == 0 || len(args)%2 != 0 { + c.WriteError("ERR wrong number of arguments for XADD") // non-default message + return + } + + var values []string + for len(args) > 0 { + values = append(values, args[0], args[1]) + args = args[2:] + } + + db := m.db(ctx.selectedDB) + s, err := db.stream(key) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + // TODO: NOMKSTREAM + s, _ = db.newStream(key) + } + + newID, err := s.add(entryID, values, m.effectiveNow()) + if err != nil { + switch err { + case errInvalidEntryID: + c.WriteError(msgInvalidStreamID) + default: + c.WriteError(err.Error()) + } + return + } + if maxlen >= 0 { + s.trim(maxlen) + } + db.keyVersion[key]++ + + c.WriteBulk(newID) + }) +} + +// XLEN +func (m *Miniredis) cmdXlen(c *server.Peer, cmd string, args []string) { + if len(args) != 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + key := args[0] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(key) + if err != nil { + c.WriteError(err.Error()) + } + if s == nil { + // No such key. That's zero length. + c.WriteInt(0) + return + } + + c.WriteInt(len(s.entries)) + }) +} + +// XRANGE and XREVRANGE +func (m *Miniredis) makeCmdXrange(reverse bool) server.Cmd { + return func(c *server.Peer, cmd string, args []string) { + if len(args) < 3 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if len(args) == 4 || len(args) > 5 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + var ( + key = args[0] + startKey = args[1] + endKey = args[2] + startExclusive bool + endExclusive bool + ) + if strings.HasPrefix(startKey, "(") { + startExclusive = true + startKey = startKey[1:] + if startKey == "-" || startKey == "+" { + setDirty(c) + c.WriteError(msgInvalidStreamID) + return + } + } + if strings.HasPrefix(endKey, "(") { + endExclusive = true + endKey = endKey[1:] + if endKey == "-" || endKey == "+" { + setDirty(c) + c.WriteError(msgInvalidStreamID) + return + } + } + + countArg := "0" + if len(args) == 5 { + if strings.ToLower(args[3]) != "count" { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + countArg = args[4] + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + start, err := formatStreamRangeBound(startKey, true, reverse) + if err != nil { + c.WriteError(msgInvalidStreamID) + return + } + end, err := formatStreamRangeBound(endKey, false, reverse) + if err != nil { + c.WriteError(msgInvalidStreamID) + return + } + count, err := strconv.Atoi(countArg) + if err != nil { + c.WriteError(msgInvalidInt) + return + } + + db := m.db(ctx.selectedDB) + + if !db.exists(key) { + c.WriteLen(0) + return + } + + if db.t(key) != "stream" { + c.WriteError(ErrWrongType.Error()) + return + } + + var entries = db.streamKeys[key].entries + if reverse { + entries = reversedStreamEntries(entries) + } + if count == 0 { + count = len(entries) + } + + var returnedEntries []StreamEntry + for _, entry := range entries { + if len(returnedEntries) == count { + break + } + + if !reverse { + // Break if entry ID > end + if streamCmp(entry.ID, end) == 1 { + break + } + + // Continue if entry ID < start + if streamCmp(entry.ID, start) == -1 { + continue + } + } else { + // Break if entry iD < end + if streamCmp(entry.ID, end) == -1 { + break + } + + // Continue if entry ID > start. + if streamCmp(entry.ID, start) == 1 { + continue + } + } + + // Continue if start exclusive and entry ID == start + if startExclusive && streamCmp(entry.ID, start) == 0 { + continue + } + // Continue if end exclusive and entry ID == end + if endExclusive && streamCmp(entry.ID, end) == 0 { + continue + } + + returnedEntries = append(returnedEntries, entry) + } + + c.WriteLen(len(returnedEntries)) + for _, entry := range returnedEntries { + c.WriteLen(2) + c.WriteBulk(entry.ID) + c.WriteLen(len(entry.Values)) + for _, v := range entry.Values { + c.WriteBulk(v) + } + } + }) + } +} + +// XGROUP +func (m *Miniredis) cmdXgroup(c *server.Peer, cmd string, args []string) { + if (len(args) == 4 || len(args) == 5) && strings.ToUpper(args[0]) == "CREATE" { + m.cmdXgroupCreate(c, cmd, args) + } else { + j := strings.Join(args, " ") + err := fmt.Sprintf("ERR 'XGROUP %s' not supported", j) + setDirty(c) + c.WriteError(err) + } +} + +// XGROUP CREATE +func (m *Miniredis) cmdXgroupCreate(c *server.Peer, cmd string, args []string) { + stream, group, id := args[1], args[2], args[3] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(stream) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil && len(args) == 5 && strings.ToUpper(args[4]) == "MKSTREAM" { + if s, err = db.newStream(stream); err != nil { + c.WriteError(err.Error()) + return + } + } + if s == nil { + c.WriteError(msgXgroupKeyNotFound) + return + } + + if err := s.createGroup(group, id); err != nil { + c.WriteError(err.Error()) + return + } + + c.WriteOK() + }) +} + +// XINFO +func (m *Miniredis) cmdXinfo(c *server.Peer, cmd string, args []string) { + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + subCmd, args := strings.ToUpper(args[0]), args[1:] + switch subCmd { + case "STREAM": + m.cmdXinfoStream(c, args) + case "CONSUMERS", "GROUPS", "HELP": + err := fmt.Sprintf("'XINFO %s' not supported", strings.Join(args, " ")) + setDirty(c) + c.WriteError(err) + default: + setDirty(c) + c.WriteError(fmt.Sprintf( + "ERR Unknown subcommand or wrong number of arguments for '%s'. Try XINFO HELP.", + subCmd, + )) + } + +} + +// XINFO STREAM +// Produces only part of full command output +func (m *Miniredis) cmdXinfoStream(c *server.Peer, args []string) { + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber("XINFO")) + return + } + key := args[0] + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(key) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteError(msgKeyNotFound) + return + } + + c.WriteMapLen(1) + c.WriteBulk("length") + c.WriteInt(len(s.entries)) + }) +} + +// XREADGROUP +func (m *Miniredis) cmdXreadgroup(c *server.Peer, cmd string, args []string) { + // XREADGROUP GROUP group consumer STREAMS key ID + if len(args) < 6 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + var opts struct { + group string + consumer string + count int + noack bool + streams []string + ids []string + block bool + blockTimeout time.Duration + } + + if strings.ToUpper(args[0]) != "GROUP" { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + + opts.group, opts.consumer, args = args[1], args[2], args[3:] + + var err error +parsing: + for len(args) > 0 { + switch strings.ToUpper(args[0]) { + case "COUNT": + if len(args) < 2 { + err = errors.New(errWrongNumber(cmd)) + break parsing + } + + opts.count, err = strconv.Atoi(args[1]) + if err != nil { + break parsing + } + + args = args[2:] + case "BLOCK": + err = parseBlock(cmd, args, &opts.block, &opts.blockTimeout) + if err != nil { + break parsing + } + args = args[2:] + case "NOACK": + args = args[1:] + opts.noack = true + case "STREAMS": + args = args[1:] + + if len(args)%2 != 0 { + err = errors.New(msgXreadUnbalanced) + break parsing + } + + opts.streams, opts.ids = args[0:len(args)/2], args[len(args)/2:] + break parsing + default: + err = fmt.Errorf("ERR incorrect argument %s", args[0]) + break parsing + } + } + + if err != nil { + setDirty(c) + c.WriteError(err.Error()) + return + } + + if len(opts.streams) == 0 || len(opts.ids) == 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + for _, id := range opts.ids { + if id != `>` { + opts.block = false + } + } + + if !opts.block { + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + res, err := xreadgroup( + db, + opts.group, + opts.consumer, + opts.noack, + opts.streams, + opts.ids, + opts.count, + m.effectiveNow(), + ) + if err != nil { + c.WriteError(err.Error()) + return + } + writeXread(c, opts.streams, res) + }) + return + } + + blocking( + m, + c, + opts.blockTimeout, + func(c *server.Peer, ctx *connCtx) bool { + db := m.db(ctx.selectedDB) + res, err := xreadgroup( + db, + opts.group, + opts.consumer, + opts.noack, + opts.streams, + opts.ids, + opts.count, + m.effectiveNow(), + ) + if err != nil { + c.WriteError(err.Error()) + return true + } + if len(res) == 0 { + return false + } + writeXread(c, opts.streams, res) + return true + }, + func(c *server.Peer) { // timeout + c.WriteLen(-1) + }, + ) +} + +func xreadgroup( + db *RedisDB, + group, + consumer string, + noack bool, + streams []string, + ids []string, + count int, + now time.Time, +) (map[string][]StreamEntry, error) { + res := map[string][]StreamEntry{} + for i, key := range streams { + id := ids[i] + + g, err := db.streamGroup(key, group) + if err != nil { + return nil, err + } + if g == nil { + return nil, errXreadgroup(key, group) + } + + if _, err := parseStreamID(id); id != `>` && err != nil { + return nil, err + } + entries := g.readGroup(now, consumer, id, count, noack) + if id == `>` && len(entries) == 0 { + continue + } + + res[key] = entries + } + return res, nil +} + +// XACK +func (m *Miniredis) cmdXack(c *server.Peer, cmd string, args []string) { + if len(args) < 3 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + key, group, ids := args[0], args[1], args[2:] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + g, err := db.streamGroup(key, group) + if err != nil { + c.WriteError(err.Error()) + return + } + if g == nil { + c.WriteInt(0) + return + } + + cnt, err := g.ack(ids) + if err != nil { + c.WriteError(err.Error()) + return + } + c.WriteInt(cnt) + }) +} + +// XDEL +func (m *Miniredis) cmdXdel(c *server.Peer, cmd string, args []string) { + if len(args) < 2 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + stream, ids := args[0], args[1:] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + s, err := db.stream(stream) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteInt(0) + return + } + + n, err := s.delete(ids) + if err != nil { + c.WriteError(err.Error()) + return + } + db.keyVersion[stream]++ + c.WriteInt(n) + }) +} + +// XREAD +func (m *Miniredis) cmdXread(c *server.Peer, cmd string, args []string) { + if len(args) < 3 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + var opts struct { + count int + streams []string + ids []string + block bool + blockTimeout time.Duration + } + var err error + +parsing: + for len(args) > 0 { + switch strings.ToUpper(args[0]) { + case "COUNT": + if len(args) < 2 { + err = errors.New(errWrongNumber(cmd)) + break parsing + } + + opts.count, err = strconv.Atoi(args[1]) + if err != nil { + break parsing + } + args = args[2:] + case "BLOCK": + err = parseBlock(cmd, args, &opts.block, &opts.blockTimeout) + if err != nil { + break parsing + } + args = args[2:] + case "STREAMS": + args = args[1:] + + if len(args)%2 != 0 { + err = errors.New(msgXreadUnbalanced) + break parsing + } + + opts.streams, opts.ids = args[0:len(args)/2], args[len(args)/2:] + for _, id := range opts.ids { + if _, err := parseStreamID(id); id != `$` && err != nil { + setDirty(c) + c.WriteError(msgInvalidStreamID) + return + } + } + args = nil + break parsing + default: + err = fmt.Errorf("ERR incorrect argument %s", args[0]) + break parsing + } + } + + if err != nil { + setDirty(c) + c.WriteError(err.Error()) + return + } + + if !opts.block { + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + res := xread(db, opts.streams, opts.ids, opts.count) + writeXread(c, opts.streams, res) + }) + return + } + blocking( + m, + c, + opts.blockTimeout, + func(c *server.Peer, ctx *connCtx) bool { + db := m.db(ctx.selectedDB) + res := xread(db, opts.streams, opts.ids, opts.count) + if len(res) == 0 { + return false + } + writeXread(c, opts.streams, res) + return true + }, + func(c *server.Peer) { // timeout + c.WriteLen(-1) + }, + ) +} + +func xread(db *RedisDB, streams []string, ids []string, count int) map[string][]StreamEntry { + res := map[string][]StreamEntry{} + for i := range streams { + stream := streams[i] + id := ids[i] + + var s, ok = db.streamKeys[stream] + if !ok { + continue + } + entries := s.entries + if len(entries) == 0 { + continue + } + + entryCount := count + if entryCount == 0 { + entryCount = len(entries) + } + + var returnedEntries []StreamEntry + for _, entry := range entries { + if len(returnedEntries) == entryCount { + break + } + if id == "$" { + id = s.lastID() + } + if streamCmp(entry.ID, id) <= 0 { + continue + } + returnedEntries = append(returnedEntries, entry) + } + if len(returnedEntries) > 0 { + res[stream] = returnedEntries + } + } + return res +} + +func writeXread(c *server.Peer, streams []string, res map[string][]StreamEntry) { + if len(res) == 0 { + c.WriteLen(-1) + return + } + c.WriteLen(len(res)) + for _, stream := range streams { + entries, ok := res[stream] + if !ok { + continue + } + c.WriteLen(2) + c.WriteBulk(stream) + c.WriteLen(len(entries)) + for _, entry := range entries { + c.WriteLen(2) + c.WriteBulk(entry.ID) + c.WriteLen(len(entry.Values)) + for _, v := range entry.Values { + c.WriteBulk(v) + } + } + } +} + +// XPENDING +func (m *Miniredis) cmdXpending(c *server.Peer, cmd string, args []string) { + if len(args) < 2 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + key, group, args := args[0], args[1], args[2:] + summary := true + if len(args) > 0 && strings.ToUpper(args[0]) == "IDLE" { + setDirty(c) + c.WriteError("ERR IDLE is unsupported") + return + } + var ( + start, end string + count int + consumer *string + ) + if len(args) >= 3 { + summary = false + + start_, err := formatStreamRangeBound(args[0], true, false) + if err != nil { + c.WriteError(msgInvalidStreamID) + return + } + start = start_ + end_, err := formatStreamRangeBound(args[1], false, false) + if err != nil { + c.WriteError(msgInvalidStreamID) + return + } + end = end_ + n, err := strconv.Atoi(args[2]) // negative is allowed + if err != nil { + c.WriteError(msgInvalidInt) + return + } + count = n + args = args[3:] + + if len(args) == 1 { + var c string + c, args = args[0], args[1:] + consumer = &c + } + } + if len(args) != 0 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + g, err := db.streamGroup(key, group) + if err != nil { + c.WriteError(err.Error()) + return + } + if g == nil { + c.WriteError(errReadgroup(key, group).Error()) + return + } + + if summary { + writeXpendingSummary(c, *g) + return + } + writeXpending(m.effectiveNow(), c, *g, start, end, count, consumer) + }) +} + +func writeXpendingSummary(c *server.Peer, g streamGroup) { + if len(g.pending) == 0 { + c.WriteLen(4) + c.WriteInt(0) + c.WriteNull() + c.WriteNull() + c.WriteLen(-1) + return + } + + // format: + // - number of pending + // - smallest ID + // - highest ID + // - all consumers with > 0 pending items + c.WriteLen(4) + c.WriteInt(len(g.pending)) + c.WriteBulk(g.pending[0].id) + c.WriteBulk(g.pending[len(g.pending)-1].id) + cons := map[string]int{} + for id := range g.consumers { + cnt := g.pendingCount(id) + if cnt > 0 { + cons[id] = cnt + } + } + c.WriteLen(len(cons)) + var ids []string + for id := range cons { + ids = append(ids, id) + } + sort.Strings(ids) // be predicatable + for _, id := range ids { + c.WriteLen(2) + c.WriteBulk(id) + c.WriteBulk(strconv.Itoa(cons[id])) + } +} + +func writeXpending( + now time.Time, + c *server.Peer, + g streamGroup, + start, + end string, + count int, + consumer *string, +) { + if len(g.pending) == 0 || count < 0 { + c.WriteLen(-1) + return + } + + // format, list of: + // - message ID + // - consumer + // - milliseconds since delivery + // - delivery count + type entry struct { + id string + consumer string + millis int + count int + } + var res []entry + for _, p := range g.pending { + if len(res) >= count { + break + } + if consumer != nil && p.consumer != *consumer { + continue + } + if streamCmp(p.id, start) < 0 { + continue + } + if streamCmp(p.id, end) > 0 { + continue + } + res = append(res, entry{ + id: p.id, + consumer: p.consumer, + millis: int(now.Sub(p.lastDelivery).Milliseconds()), + count: p.deliveryCount, + }) + } + c.WriteLen(len(res)) + for _, e := range res { + c.WriteLen(4) + c.WriteBulk(e.id) + c.WriteBulk(e.consumer) + c.WriteInt(e.millis) + c.WriteInt(e.count) + } +} + +// XTRIM +func (m *Miniredis) cmdXtrim(c *server.Peer, cmd string, args []string) { + if len(args) < 3 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + var opts struct { + stream string + strategy string + maxLen int // for MAXLEN + threshold string // for MINID + withLimit bool // "LIMIT" + withExact bool // "=" + withNearly bool // "~" + } + + opts.stream, opts.strategy, args = args[0], strings.ToUpper(args[1]), args[2:] + + if opts.strategy != "MAXLEN" && opts.strategy != "MINID" { + setDirty(c) + c.WriteError(msgXtrimInvalidStrategy) + return + } + + // Ignore nearly exact trimming parameters. + switch args[0] { + case "=": + opts.withExact = true + args = args[1:] + case "~": + opts.withNearly = true + args = args[1:] + } + + switch opts.strategy { + case "MAXLEN": + maxLen, err := strconv.Atoi(args[0]) + if err != nil { + setDirty(c) + c.WriteError(msgXtrimInvalidMaxLen) + return + } + opts.maxLen = maxLen + case "MINID": + opts.threshold = args[0] + } + args = args[1:] + + if len(args) == 2 && strings.ToUpper(args[0]) == "LIMIT" { + // Ignore LIMIT. + opts.withLimit = true + if _, err := strconv.Atoi(args[1]); err != nil { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } + + args = args[2:] + } + + if len(args) != 0 { + setDirty(c) + c.WriteError(fmt.Sprintf("ERR incorrect argument %s", args[0])) + return + } + + if opts.withLimit && !opts.withNearly { + setDirty(c) + c.WriteError(fmt.Sprintf(msgXtrimInvalidLimit)) + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + s, err := db.stream(opts.stream) + if err != nil { + setDirty(c) + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteInt(0) + return + } + + switch opts.strategy { + case "MAXLEN": + entriesBefore := len(s.entries) + s.trim(opts.maxLen) + c.WriteInt(entriesBefore - len(s.entries)) + case "MINID": + var delete []string + for _, entry := range s.entries { + if entry.ID < opts.threshold { + delete = append(delete, entry.ID) + } else { + break + } + } + s.delete(delete) + c.WriteInt(len(delete)) + } + }) +} + +func parseBlock(cmd string, args []string, block *bool, timeout *time.Duration) error { + if len(args) < 2 { + return errors.New(errWrongNumber(cmd)) + } + (*block) = true + ms, err := strconv.Atoi(args[1]) + if err != nil { + return errors.New(msgInvalidInt) + } + if ms < 0 { + return errors.New("ERR timeout is negative") + } + (*timeout) = time.Millisecond * time.Duration(ms) + return nil +} diff --git a/vendor/github.com/alicebob/miniredis/cmd_string.go b/vendor/github.com/alicebob/miniredis/v2/cmd_string.go similarity index 84% rename from vendor/github.com/alicebob/miniredis/cmd_string.go rename to vendor/github.com/alicebob/miniredis/v2/cmd_string.go index 930da992..f6b5038a 100644 --- a/vendor/github.com/alicebob/miniredis/cmd_string.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_string.go @@ -3,11 +3,12 @@ package miniredis import ( + "math/big" "strconv" "strings" "time" - "github.com/alicebob/miniredis/server" + "github.com/alicebob/miniredis/v2/server" ) // commandsString handles all string value operations. @@ -22,6 +23,7 @@ func commandsString(m *Miniredis) { m.srv.Register("GET", m.cmdGet) m.srv.Register("GETRANGE", m.cmdGetrange) m.srv.Register("GETSET", m.cmdGetset) + m.srv.Register("GETDEL", m.cmdGetdel) m.srv.Register("INCRBYFLOAT", m.cmdIncrbyfloat) m.srv.Register("INCRBY", m.cmdIncrby) m.srv.Register("INCR", m.cmdIncr) @@ -47,49 +49,77 @@ func (m *Miniredis) cmdSet(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } - var ( - nx = false // set iff not exists - xx = false // set iff exists - ttl time.Duration - ) + var opts struct { + key string + value string + nx bool // set iff not exists + xx bool // set iff exists + keepttl bool // set keepttl + ttlSet bool + ttl time.Duration + get bool + } - key, value, args := args[0], args[1], args[2:] + opts.key, opts.value, args = args[0], args[1], args[2:] for len(args) > 0 { timeUnit := time.Second - switch strings.ToUpper(args[0]) { + switch arg := strings.ToUpper(args[0]); arg { case "NX": - nx = true + opts.nx = true args = args[1:] continue case "XX": - xx = true + opts.xx = true + args = args[1:] + continue + case "KEEPTTL": + opts.keepttl = true args = args[1:] continue - case "PX": + case "PX", "PXAT": timeUnit = time.Millisecond fallthrough - case "EX": + case "EX", "EXAT": if len(args) < 2 { setDirty(c) c.WriteError(msgInvalidInt) return } + if opts.ttlSet { + // multiple ex/exat/px/pxat options set + setDirty(c) + c.WriteError(msgSyntaxError) + return + } expire, err := strconv.Atoi(args[1]) if err != nil { setDirty(c) c.WriteError(msgInvalidInt) return } - ttl = time.Duration(expire) * timeUnit - if ttl <= 0 { + if expire <= 0 { setDirty(c) c.WriteError(msgInvalidSETime) return } + if arg == "PXAT" || arg == "EXAT" { + opts.ttl = m.at(expire, timeUnit) + } else { + opts.ttl = time.Duration(expire) * timeUnit + } + opts.ttlSet = true + args = args[2:] continue + case "GET": + opts.get = true + args = args[1:] + continue default: setDirty(c) c.WriteError(msgSyntaxError) @@ -100,24 +130,45 @@ func (m *Miniredis) cmdSet(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if nx { - if db.exists(key) { + if opts.nx { + if db.exists(opts.key) { c.WriteNull() return } } - if xx { - if !db.exists(key) { + if opts.xx { + if !db.exists(opts.key) { c.WriteNull() return } } - - db.del(key, true) // be sure to remove existing values of other type keys. + if opts.keepttl { + if val, ok := db.ttl[opts.key]; ok { + opts.ttl = val + } + } + if opts.get { + if t, ok := db.keys[opts.key]; ok && t != "string" { + c.WriteError(msgWrongType) + return + } + } + old, existed := db.stringKeys[opts.key] + db.del(opts.key, true) // be sure to remove existing values of other type keys. // a vanilla SET clears the expire - db.stringSet(key, value) - if ttl != 0 { - db.ttl[key] = ttl + if opts.ttl >= 0 { // EXAT/PXAT can expire right away + db.stringSet(opts.key, opts.value) + } + if opts.ttl != 0 { + db.ttl[opts.key] = opts.ttl + } + if opts.get { + if !existed { + c.WriteNull() + } else { + c.WriteBulk(old) + } + return } c.WriteOK() }) @@ -133,6 +184,9 @@ func (m *Miniredis) cmdSetex(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] ttl, err := strconv.Atoi(args[1]) @@ -168,6 +222,9 @@ func (m *Miniredis) cmdPsetex(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] ttl, err := strconv.Atoi(args[1]) @@ -203,6 +260,9 @@ func (m *Miniredis) cmdSetnx(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, value := args[0], args[1] @@ -229,6 +289,9 @@ func (m *Miniredis) cmdMset(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } if len(args)%2 != 0 { setDirty(c) @@ -261,6 +324,9 @@ func (m *Miniredis) cmdMsetnx(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } if len(args)%2 != 0 { setDirty(c) @@ -306,6 +372,9 @@ func (m *Miniredis) cmdGet(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -335,6 +404,9 @@ func (m *Miniredis) cmdGetset(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, value := args[0], args[1] @@ -359,6 +431,41 @@ func (m *Miniredis) cmdGetset(c *server.Peer, cmd string, args []string) { }) } +// GETDEL +func (m *Miniredis) cmdGetdel(c *server.Peer, cmd string, args []string) { + if len(args) != 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + key := args[0] + + if !db.exists(key) { + c.WriteNull() + return + } + + if db.t(key) != "string" { + c.WriteError(msgWrongType) + return + } + + v := db.stringGet(key) + db.del(key, true) + c.WriteBulk(v) + }) +} + // MGET func (m *Miniredis) cmdMget(c *server.Peer, cmd string, args []string) { if len(args) < 1 { @@ -369,6 +476,9 @@ func (m *Miniredis) cmdMget(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -400,6 +510,9 @@ func (m *Miniredis) cmdIncr(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -429,6 +542,9 @@ func (m *Miniredis) cmdIncrby(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] delta, err := strconv.Atoi(args[1]) @@ -466,9 +582,12 @@ func (m *Miniredis) cmdIncrbyfloat(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] - delta, err := strconv.ParseFloat(args[1], 64) + delta, _, err := big.ParseFloat(args[1], 10, 128, 0) if err != nil { setDirty(c) c.WriteError(msgInvalidFloat) @@ -489,7 +608,7 @@ func (m *Miniredis) cmdIncrbyfloat(c *server.Peer, cmd string, args []string) { return } // Don't touch TTL - c.WriteBulk(formatFloat(v)) + c.WriteBulk(formatBig(v)) }) } @@ -503,6 +622,9 @@ func (m *Miniredis) cmdDecr(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -532,6 +654,9 @@ func (m *Miniredis) cmdDecrby(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] delta, err := strconv.Atoi(args[1]) @@ -569,6 +694,9 @@ func (m *Miniredis) cmdStrlen(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] @@ -594,6 +722,9 @@ func (m *Miniredis) cmdAppend(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key, value := args[0], args[1] @@ -622,6 +753,9 @@ func (m *Miniredis) cmdGetrange(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] start, err := strconv.Atoi(args[1]) @@ -660,6 +794,9 @@ func (m *Miniredis) cmdSetrange(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] pos, err := strconv.Atoi(args[1]) @@ -705,6 +842,9 @@ func (m *Miniredis) cmdBitcount(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } var ( useRange = false @@ -767,6 +907,9 @@ func (m *Miniredis) cmdBitop(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } var ( op = strings.ToUpper(args[0]) @@ -844,6 +987,9 @@ func (m *Miniredis) cmdBitpos(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] bit, err := strconv.Atoi(args[1]) @@ -878,24 +1024,42 @@ func (m *Miniredis) cmdBitpos(c *server.Peer, cmd string, args []string) { if t, ok := db.keys[key]; ok && t != "string" { c.WriteError(msgWrongType) return + } else if !ok { + // non-existing key behaves differently + if bit == 0 { + c.WriteInt(0) + } else { + c.WriteInt(-1) + } + return } value := db.stringKeys[key] - if start != 0 { - if start > len(value) { - start = len(value) + + if start < 0 { + start += len(value) + if start < 0 { + start = 0 } } + if start > len(value) { + start = len(value) + } + if withEnd { - end++ // redis end semantics. if end < 0 { - end = len(value) + end + end += len(value) } + if end < 0 { + end = 0 + } + end++ // +1 for redis end semantics if end > len(value) { end = len(value) } } else { end = len(value) } + if start != 0 || withEnd { if end < start { value = "" @@ -909,7 +1073,7 @@ func (m *Miniredis) cmdBitpos(c *server.Peer, cmd string, args []string) { } // Special case when looking for 0, but not when start and end are // given. - if bit == 0 && pos == -1 && !withEnd { + if bit == 0 && pos == -1 && !withEnd && len(value) > 0 { pos = start*8 + len(value)*8 } c.WriteInt(pos) @@ -926,6 +1090,9 @@ func (m *Miniredis) cmdGetbit(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] bit, err := strconv.Atoi(args[1]) @@ -969,6 +1136,9 @@ func (m *Miniredis) cmdSetbit(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } key := args[0] bit, err := strconv.Atoi(args[1]) diff --git a/vendor/github.com/alicebob/miniredis/cmd_transactions.go b/vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go similarity index 83% rename from vendor/github.com/alicebob/miniredis/cmd_transactions.go rename to vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go index 64912cf5..9cbcaf3b 100644 --- a/vendor/github.com/alicebob/miniredis/cmd_transactions.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go @@ -3,7 +3,7 @@ package miniredis import ( - "github.com/alicebob/miniredis/server" + "github.com/alicebob/miniredis/v2/server" ) // commandsTransaction handles MULTI &c. @@ -24,9 +24,15 @@ func (m *Miniredis) cmdMulti(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } ctx := getCtx(c) - + if ctx.nested { + c.WriteError(msgNotFromScripts) + return + } if inTx(ctx) { c.WriteError("ERR MULTI calls can not be nested") return @@ -47,9 +53,15 @@ func (m *Miniredis) cmdExec(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } ctx := getCtx(c) - + if ctx.nested { + c.WriteError(msgNotFromScripts) + return + } if !inTx(ctx) { c.WriteError("ERR EXEC without MULTI") return @@ -57,6 +69,8 @@ func (m *Miniredis) cmdExec(c *server.Peer, cmd string, args []string) { if ctx.dirtyTransaction { c.WriteError("EXECABORT Transaction discarded because of previous errors.") + // a failed EXEC finishes the tx + stopTx(ctx) return } @@ -68,7 +82,7 @@ func (m *Miniredis) cmdExec(c *server.Peer, cmd string, args []string) { if m.db(t.db).keyVersion[t.key] > version { // Abort! Abort! stopTx(ctx) - c.WriteLen(0) + c.WriteLen(-1) return } } @@ -93,6 +107,9 @@ func (m *Miniredis) cmdDiscard(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } ctx := getCtx(c) if !inTx(ctx) { @@ -114,8 +131,15 @@ func (m *Miniredis) cmdWatch(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts) + return + } if inTx(ctx) { c.WriteError("ERR WATCH in MULTI") return @@ -141,6 +165,9 @@ func (m *Miniredis) cmdUnwatch(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } + if m.checkPubsub(c, cmd) { + return + } // Doesn't matter if UNWATCH is in a TX or not. Looks like a Redis bug to me. unwatch(getCtx(c)) diff --git a/vendor/github.com/alicebob/miniredis/db.go b/vendor/github.com/alicebob/miniredis/v2/db.go similarity index 73% rename from vendor/github.com/alicebob/miniredis/db.go rename to vendor/github.com/alicebob/miniredis/v2/db.go index 5600afe8..5c2b1aaf 100644 --- a/vendor/github.com/alicebob/miniredis/db.go +++ b/vendor/github.com/alicebob/miniredis/v2/db.go @@ -1,11 +1,18 @@ package miniredis import ( + "errors" + "fmt" + "math/big" "sort" "strconv" "time" ) +var ( + errInvalidEntryID = errors.New("stream ID is invalid") +) + func (db *RedisDB) exists(k string) bool { _, ok := db.keys[k] return ok @@ -33,8 +40,10 @@ func (db *RedisDB) flush() { db.hashKeys = map[string]hashKey{} db.listKeys = map[string]listKey{} db.setKeys = map[string]setKey{} + db.hllKeys = map[string]*hll{} db.sortedsetKeys = map[string]sortedSet{} db.ttl = map[string]time.Duration{} + db.streamKeys = map[string]*streamKey{} } // move something to another db. Will return ok. Or not. @@ -59,6 +68,10 @@ func (db *RedisDB) move(key string, to *RedisDB) bool { to.setKeys[key] = db.setKeys[key] case "zset": to.sortedsetKeys[key] = db.sortedsetKeys[key] + case "stream": + to.streamKeys[key] = db.streamKeys[key] + case "hll": + to.hllKeys[key] = db.hllKeys[key] default: panic("unhandled key type") } @@ -83,12 +96,18 @@ func (db *RedisDB) rename(from, to string) { db.setKeys[to] = db.setKeys[from] case "zset": db.sortedsetKeys[to] = db.sortedsetKeys[from] + case "stream": + db.streamKeys[to] = db.streamKeys[from] + case "hll": + db.hllKeys[to] = db.hllKeys[from] default: panic("missing case") } db.keys[to] = db.keys[from] db.keyVersion[to]++ - db.ttl[to] = db.ttl[from] + if v, ok := db.ttl[from]; ok { + db.ttl[to] = v + } db.del(from, true) } @@ -114,6 +133,10 @@ func (db *RedisDB) del(k string, delTTL bool) { delete(db.setKeys, k) case "zset": delete(db.sortedsetKeys, k) + case "stream": + delete(db.streamKeys, k) + case "hll": + delete(db.hllKeys, k) default: panic("Unknown key type: " + t) } @@ -151,17 +174,18 @@ func (db *RedisDB) stringIncr(k string, delta int) (int, error) { } // change float key value -func (db *RedisDB) stringIncrfloat(k string, delta float64) (float64, error) { - v := 0.0 +func (db *RedisDB) stringIncrfloat(k string, delta *big.Float) (*big.Float, error) { + v := big.NewFloat(0.0) + v.SetPrec(128) if sv, ok := db.stringKeys[k]; ok { var err error - v, err = strconv.ParseFloat(sv, 64) + v, _, err = big.ParseFloat(sv, 10, 128, 0) if err != nil { - return 0, ErrFloatValueError + return nil, ErrFloatValueError } } - v += delta - db.stringSet(k, formatFloat(v)) + v.Add(v, delta) + db.stringSet(k, formatBig(v)) return v, nil } @@ -287,7 +311,7 @@ func (db *RedisDB) setIsMember(k, v string) bool { // hashFields returns all (sorted) keys ('fields') for a hash key. func (db *RedisDB) hashFields(k string) []string { v := db.hashKeys[k] - r := make([]string, 0, len(v)) + var r []string for k := range v { r = append(r, k) } @@ -295,13 +319,24 @@ func (db *RedisDB) hashFields(k string) []string { return r } +// hashValues returns all (sorted) values a hash key. +func (db *RedisDB) hashValues(k string) []string { + h := db.hashKeys[k] + var r []string + for _, v := range h { + r = append(r, v) + } + sort.Strings(r) + return r +} + // hashGet a value func (db *RedisDB) hashGet(key, field string) string { return db.hashKeys[key][field] } -// hashSet returns whether the key already existed -func (db *RedisDB) hashSet(k, f, v string) bool { +// hashSet returns the number of new keys +func (db *RedisDB) hashSet(k string, fv ...string) int { if t, ok := db.keys[k]; ok && t != "hash" { db.del(k, true) } @@ -309,10 +344,17 @@ func (db *RedisDB) hashSet(k, f, v string) bool { if _, ok := db.hashKeys[k]; !ok { db.hashKeys[k] = map[string]string{} } - _, ok := db.hashKeys[k][f] - db.hashKeys[k][f] = v - db.keyVersion[k]++ - return ok + new := 0 + for idx := 0; idx < len(fv)-1; idx = idx + 2 { + f, v := fv[idx], fv[idx+1] + _, ok := db.hashKeys[k][f] + db.hashKeys[k][f] = v + db.keyVersion[k]++ + if !ok { + new++ + } + } + return new } // hashIncr changes int key value @@ -333,19 +375,20 @@ func (db *RedisDB) hashIncr(key, field string, delta int) (int, error) { } // hashIncrfloat changes float key value -func (db *RedisDB) hashIncrfloat(key, field string, delta float64) (float64, error) { - v := 0.0 +func (db *RedisDB) hashIncrfloat(key, field string, delta *big.Float) (*big.Float, error) { + v := big.NewFloat(0.0) + v.SetPrec(128) if h, ok := db.hashKeys[key]; ok { if f, ok := h[field]; ok { var err error - v, err = strconv.ParseFloat(f, 64) + v, _, err = big.ParseFloat(f, 10, 128, 0) if err != nil { - return 0, ErrFloatValueError + return nil, ErrFloatValueError } } } - v += delta - db.hashSet(key, field, formatFloat(v)) + v.Add(v, delta) + db.hashSet(key, field, formatBig(v)) return v, nil } @@ -399,6 +442,14 @@ func (db *RedisDB) ssetElements(key string) ssElems { return ss.byScore(asc) } +func (db *RedisDB) ssetRandomMember(key string) string { + elems := db.ssetElements(key) + if len(elems) == 0 { + return "" + } + return elems[db.master.randIntn(len(elems))].member +} + // ssetCard is the sorted set cardinality. func (db *RedisDB) ssetCard(key string) int { ss := db.sortedsetKeys[key] @@ -534,6 +585,38 @@ func (db *RedisDB) setUnion(keys []string) (setKey, error) { return s, nil } +func (db *RedisDB) newStream(key string) (*streamKey, error) { + if s, err := db.stream(key); err != nil { + return nil, err + } else if s != nil { + return nil, fmt.Errorf("ErrAlreadyExists") + } + + db.keys[key] = "stream" + s := newStreamKey() + db.streamKeys[key] = s + db.keyVersion[key]++ + return s, nil +} + +// return existing stream, or nil. +func (db *RedisDB) stream(key string) (*streamKey, error) { + if db.exists(key) && db.t(key) != "stream" { + return nil, ErrWrongType + } + + return db.streamKeys[key], nil +} + +// return existing stream group, or nil. +func (db *RedisDB) streamGroup(key, group string) (*streamGroup, error) { + s, err := db.stream(key) + if err != nil || s == nil { + return nil, err + } + return s.groups[group], nil +} + // fastForward proceeds the current timestamp with duration, works as a time machine func (db *RedisDB) fastForward(duration time.Duration) { for _, key := range db.allKeys() { @@ -549,3 +632,69 @@ func (db *RedisDB) checkTTL(key string) { db.del(key, true) } } + +// hllAdd adds members to a hll. Returns 1 if at least 1 if internal HyperLogLog was altered, otherwise 0 +func (db *RedisDB) hllAdd(k string, elems ...string) int { + s, ok := db.hllKeys[k] + if !ok { + s = newHll() + db.keys[k] = "hll" + } + hllAltered := 0 + for _, e := range elems { + if s.Add([]byte(e)) { + hllAltered = 1 + } + } + db.hllKeys[k] = s + db.keyVersion[k]++ + return hllAltered +} + +// hllCount estimates the amount of members added to hll by hllAdd. If called with several arguments, hllCount returns a sum of estimations +func (db *RedisDB) hllCount(keys []string) (int, error) { + countOverall := 0 + for _, key := range keys { + if db.exists(key) && db.t(key) != "hll" { + return 0, ErrNotValidHllValue + } + if !db.exists(key) { + continue + } + countOverall += db.hllKeys[key].Count() + } + + return countOverall, nil +} + +// hllMerge merges all the hlls provided as keys to the first key. Creates a new hll in the first key if it contains nothing +func (db *RedisDB) hllMerge(keys []string) error { + for _, key := range keys { + if db.exists(key) && db.t(key) != "hll" { + return ErrNotValidHllValue + } + } + + destKey := keys[0] + restKeys := keys[1:] + + var destHll *hll + if db.exists(destKey) { + destHll = db.hllKeys[destKey] + } else { + destHll = newHll() + } + + for _, key := range restKeys { + if !db.exists(key) { + continue + } + destHll.Merge(db.hllKeys[key]) + } + + db.hllKeys[destKey] = destHll + db.keys[destKey] = "hll" + db.keyVersion[destKey]++ + + return nil +} diff --git a/vendor/github.com/alicebob/miniredis/direct.go b/vendor/github.com/alicebob/miniredis/v2/direct.go similarity index 64% rename from vendor/github.com/alicebob/miniredis/direct.go rename to vendor/github.com/alicebob/miniredis/v2/direct.go index ca41449f..23b6703a 100644 --- a/vendor/github.com/alicebob/miniredis/direct.go +++ b/vendor/github.com/alicebob/miniredis/v2/direct.go @@ -4,16 +4,23 @@ package miniredis import ( "errors" + "math/big" "time" ) var ( // ErrKeyNotFound is returned when a key doesn't exist. ErrKeyNotFound = errors.New(msgKeyNotFound) + // ErrWrongType when a key is not the right type. ErrWrongType = errors.New(msgWrongType) + + // ErrNotValidHllValue when a key is not a valid HyperLogLog string value. + ErrNotValidHllValue = errors.New(msgNotValidHllValue) + // ErrIntValueError can returned by INCRBY ErrIntValueError = errors.New(msgInvalidInt) + // ErrFloatValueError can returned by INCRBYFLOAT ErrFloatValueError = errors.New(msgInvalidFloat) ) @@ -34,6 +41,7 @@ func (m *Miniredis) Keys() []string { func (db *RedisDB) Keys() []string { db.master.Lock() defer db.master.Unlock() + return db.allKeys() } @@ -41,6 +49,8 @@ func (db *RedisDB) Keys() []string { func (m *Miniredis) FlushAll() { m.Lock() defer m.Unlock() + defer m.signal.Broadcast() + m.flushAll() } @@ -59,6 +69,8 @@ func (m *Miniredis) FlushDB() { func (db *RedisDB) FlushDB() { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() + db.flush() } @@ -71,6 +83,7 @@ func (m *Miniredis) Get(k string) (string, error) { func (db *RedisDB) Get(k string) (string, error) { db.master.Lock() defer db.master.Unlock() + if !db.exists(k) { return "", ErrKeyNotFound } @@ -90,6 +103,7 @@ func (m *Miniredis) Set(k, v string) error { func (db *RedisDB) Set(k, v string) error { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() if db.exists(k) && db.t(k) != "string" { return ErrWrongType @@ -108,6 +122,7 @@ func (m *Miniredis) Incr(k string, delta int) (int, error) { func (db *RedisDB) Incr(k string, delta int) (int, error) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() if db.exists(k) && db.t(k) != "string" { return 0, ErrWrongType @@ -116,6 +131,12 @@ func (db *RedisDB) Incr(k string, delta int) (int, error) { return db.stringIncr(k, delta) } +// IncrByFloat increments the float value of a key by the given delta. +// is an alias for Miniredis.Incrfloat +func (m *Miniredis) IncrByFloat(k string, delta float64) (float64, error) { + return m.Incrfloat(k, delta) +} + // Incrfloat changes a float string value by delta. func (m *Miniredis) Incrfloat(k string, delta float64) (float64, error) { return m.DB(m.selectedDB).Incrfloat(k, delta) @@ -125,12 +146,18 @@ func (m *Miniredis) Incrfloat(k string, delta float64) (float64, error) { func (db *RedisDB) Incrfloat(k string, delta float64) (float64, error) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() if db.exists(k) && db.t(k) != "string" { return 0, ErrWrongType } - return db.stringIncrfloat(k, delta) + v, err := db.stringIncrfloat(k, big.NewFloat(delta)) + if err != nil { + return 0, err + } + vf, _ := v.Float64() + return vf, nil } // List returns the list k, or an error if it's not there or something else. @@ -156,15 +183,16 @@ func (db *RedisDB) List(k string) ([]string, error) { return db.listKeys[k], nil } -// Lpush is an unshift. Returns the new length. +// Lpush prepends one value to a list. Returns the new length. func (m *Miniredis) Lpush(k, v string) (int, error) { return m.DB(m.selectedDB).Lpush(k, v) } -// Lpush is an unshift. Returns the new length. +// Lpush prepends one value to a list. Returns the new length. func (db *RedisDB) Lpush(k, v string) (int, error) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() if db.exists(k) && db.t(k) != "list" { return 0, ErrWrongType @@ -172,15 +200,16 @@ func (db *RedisDB) Lpush(k, v string) (int, error) { return db.listLpush(k, v), nil } -// Lpop is a shift. Returns the popped element. +// Lpop removes and returns the last element in a list. func (m *Miniredis) Lpop(k string) (string, error) { return m.DB(m.selectedDB).Lpop(k) } -// Lpop is a shift. Returns the popped element. +// Lpop removes and returns the last element in a list. func (db *RedisDB) Lpop(k string) (string, error) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() if !db.exists(k) { return "", ErrKeyNotFound @@ -191,7 +220,13 @@ func (db *RedisDB) Lpop(k string) (string, error) { return db.listLpop(k), nil } -// Push add element at the end. Is called RPUSH in redis. Returns the new length. +// RPush appends one or multiple values to a list. Returns the new length. +// An alias for Push +func (m *Miniredis) RPush(k string, v ...string) (int, error) { + return m.Push(k, v...) +} + +// Push add element at the end. Returns the new length. func (m *Miniredis) Push(k string, v ...string) (int, error) { return m.DB(m.selectedDB).Push(k, v...) } @@ -200,6 +235,7 @@ func (m *Miniredis) Push(k string, v ...string) (int, error) { func (db *RedisDB) Push(k string, v ...string) (int, error) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() if db.exists(k) && db.t(k) != "list" { return 0, ErrWrongType @@ -207,6 +243,11 @@ func (db *RedisDB) Push(k string, v ...string) (int, error) { return db.listPush(k, v...), nil } +// RPop is an alias for Pop +func (m *Miniredis) RPop(k string) (string, error) { + return m.Pop(k) +} + // Pop removes and returns the last element. Is called RPOP in Redis. func (m *Miniredis) Pop(k string) (string, error) { return m.DB(m.selectedDB).Pop(k) @@ -216,6 +257,7 @@ func (m *Miniredis) Pop(k string) (string, error) { func (db *RedisDB) Pop(k string) (string, error) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() if !db.exists(k) { return "", ErrKeyNotFound @@ -227,6 +269,12 @@ func (db *RedisDB) Pop(k string) (string, error) { return db.listPop(k), nil } +// SAdd adds keys to a set. Returns the number of new keys. +// Alias for SetAdd +func (m *Miniredis) SAdd(k string, elems ...string) (int, error) { + return m.SetAdd(k, elems...) +} + // SetAdd adds keys to a set. Returns the number of new keys. func (m *Miniredis) SetAdd(k string, elems ...string) (int, error) { return m.DB(m.selectedDB).SetAdd(k, elems...) @@ -236,13 +284,21 @@ func (m *Miniredis) SetAdd(k string, elems ...string) (int, error) { func (db *RedisDB) SetAdd(k string, elems ...string) (int, error) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() + if db.exists(k) && db.t(k) != "set" { return 0, ErrWrongType } return db.setAdd(k, elems...), nil } -// Members gives all set keys. Sorted. +// SMembers returns all keys in a set, sorted. +// Alias for Members. +func (m *Miniredis) SMembers(k string) ([]string, error) { + return m.Members(k) +} + +// Members returns all keys in a set, sorted. func (m *Miniredis) Members(k string) ([]string, error) { return m.DB(m.selectedDB).Members(k) } @@ -251,6 +307,7 @@ func (m *Miniredis) Members(k string) ([]string, error) { func (db *RedisDB) Members(k string) ([]string, error) { db.master.Lock() defer db.master.Unlock() + if !db.exists(k) { return nil, ErrKeyNotFound } @@ -260,6 +317,12 @@ func (db *RedisDB) Members(k string) ([]string, error) { return db.setMembers(k), nil } +// SIsMember tells if value is in the set. +// Alias for IsMember +func (m *Miniredis) SIsMember(k, v string) (bool, error) { + return m.IsMember(k, v) +} + // IsMember tells if value is in the set. func (m *Miniredis) IsMember(k, v string) (bool, error) { return m.DB(m.selectedDB).IsMember(k, v) @@ -269,6 +332,7 @@ func (m *Miniredis) IsMember(k, v string) (bool, error) { func (db *RedisDB) IsMember(k, v string) (bool, error) { db.master.Lock() defer db.master.Unlock() + if !db.exists(k) { return false, ErrKeyNotFound } @@ -287,6 +351,7 @@ func (m *Miniredis) HKeys(k string) ([]string, error) { func (db *RedisDB) HKeys(key string) ([]string, error) { db.master.Lock() defer db.master.Unlock() + if !db.exists(key) { return nil, ErrKeyNotFound } @@ -305,6 +370,8 @@ func (m *Miniredis) Del(k string) bool { func (db *RedisDB) Del(k string) bool { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() + if !db.exists(k) { return false } @@ -312,9 +379,22 @@ func (db *RedisDB) Del(k string) bool { return true } +// Unlink deletes a key and any expiration value. Returns where there was a key. +// It's exactly the same as Del() and is not async. It is here for the consistency. +func (m *Miniredis) Unlink(k string) bool { + return m.Del(k) +} + +// Unlink deletes a key and any expiration value. Returns where there was a key. +// It's exactly the same as Del() and is not async. It is here for the consistency. +func (db *RedisDB) Unlink(k string) bool { + return db.Del(k) +} + // TTL is the left over time to live. As set via EXPIRE, PEXPIRE, EXPIREAT, // PEXPIREAT. -// 0 if not set. +// Note: this direct function returns 0 if there is no TTL set, unlike redis, +// which returns -1. func (m *Miniredis) TTL(k string) time.Duration { return m.DB(m.selectedDB).TTL(k) } @@ -325,6 +405,7 @@ func (m *Miniredis) TTL(k string) time.Duration { func (db *RedisDB) TTL(k string) time.Duration { db.master.Lock() defer db.master.Unlock() + return db.ttl[k] } @@ -337,6 +418,8 @@ func (m *Miniredis) SetTTL(k string, ttl time.Duration) { func (db *RedisDB) SetTTL(k string, ttl time.Duration) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() + db.ttl[k] = ttl db.keyVersion[k]++ } @@ -350,6 +433,7 @@ func (m *Miniredis) Type(k string) string { func (db *RedisDB) Type(k string) string { db.master.Lock() defer db.master.Unlock() + return db.t(k) } @@ -362,6 +446,7 @@ func (m *Miniredis) Exists(k string) bool { func (db *RedisDB) Exists(k string) bool { db.master.Lock() defer db.master.Unlock() + return db.exists(k) } @@ -378,6 +463,7 @@ func (m *Miniredis) HGet(k, f string) string { func (db *RedisDB) HGet(k, f string) string { db.master.Lock() defer db.master.Unlock() + h, ok := db.hashKeys[k] if !ok { return "" @@ -385,18 +471,20 @@ func (db *RedisDB) HGet(k, f string) string { return h[f] } -// HSet sets a hash key. +// HSet sets hash keys. // If there is another key by the same name it will be gone. -func (m *Miniredis) HSet(k, f, v string) { - m.DB(m.selectedDB).HSet(k, f, v) +func (m *Miniredis) HSet(k string, fv ...string) { + m.DB(m.selectedDB).HSet(k, fv...) } -// HSet sets a hash key. +// HSet sets hash keys. // If there is another key by the same name it will be gone. -func (db *RedisDB) HSet(k, f, v string) { +func (db *RedisDB) HSet(k string, fv ...string) { db.master.Lock() defer db.master.Unlock() - db.hashSet(k, f, v) + defer db.master.signal.Broadcast() + + db.hashSet(k, fv...) } // HDel deletes a hash key. @@ -408,6 +496,8 @@ func (m *Miniredis) HDel(k, f string) { func (db *RedisDB) HDel(k, f string) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() + db.hdel(k, f) } @@ -419,6 +509,11 @@ func (db *RedisDB) hdel(k, f string) { db.keyVersion[k]++ } +// HIncrBy increases the integer value of a hash field by delta (int). +func (m *Miniredis) HIncrBy(k, f string, delta int) (int, error) { + return m.HIncr(k, f, delta) +} + // HIncr increases a key/field by delta (int). func (m *Miniredis) HIncr(k, f string, delta int) (int, error) { return m.DB(m.selectedDB).HIncr(k, f, delta) @@ -428,9 +523,16 @@ func (m *Miniredis) HIncr(k, f string, delta int) (int, error) { func (db *RedisDB) HIncr(k, f string, delta int) (int, error) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() + return db.hashIncr(k, f, delta) } +// HIncrByFloat increases a key/field by delta (float). +func (m *Miniredis) HIncrByFloat(k, f string, delta float64) (float64, error) { + return m.HIncrfloat(k, f, delta) +} + // HIncrfloat increases a key/field by delta (float). func (m *Miniredis) HIncrfloat(k, f string, delta float64) (float64, error) { return m.DB(m.selectedDB).HIncrfloat(k, f, delta) @@ -440,7 +542,14 @@ func (m *Miniredis) HIncrfloat(k, f string, delta float64) (float64, error) { func (db *RedisDB) HIncrfloat(k, f string, delta float64) (float64, error) { db.master.Lock() defer db.master.Unlock() - return db.hashIncrfloat(k, f, delta) + defer db.master.signal.Broadcast() + + v, err := db.hashIncrfloat(k, f, big.NewFloat(delta)) + if err != nil { + return 0, err + } + vf, _ := v.Float64() + return vf, nil } // SRem removes fields from a set. Returns number of deleted fields. @@ -452,6 +561,8 @@ func (m *Miniredis) SRem(k string, fields ...string) (int, error) { func (db *RedisDB) SRem(k string, fields ...string) (int, error) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() + if !db.exists(k) { return 0, ErrKeyNotFound } @@ -470,21 +581,24 @@ func (m *Miniredis) ZAdd(k string, score float64, member string) (bool, error) { func (db *RedisDB) ZAdd(k string, score float64, member string) (bool, error) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() + if db.exists(k) && db.t(k) != "zset" { return false, ErrWrongType } return db.ssetAdd(k, score, member), nil } -// ZMembers returns all members by score +// ZMembers returns all members of a sorted set by score func (m *Miniredis) ZMembers(k string) ([]string, error) { return m.DB(m.selectedDB).ZMembers(k) } -// ZMembers returns all members by score +// ZMembers returns all members of a sorted set by score func (db *RedisDB) ZMembers(k string) ([]string, error) { db.master.Lock() defer db.master.Unlock() + if !db.exists(k) { return nil, ErrKeyNotFound } @@ -503,6 +617,7 @@ func (m *Miniredis) SortedSet(k string) (map[string]float64, error) { func (db *RedisDB) SortedSet(k string) (map[string]float64, error) { db.master.Lock() defer db.master.Unlock() + if !db.exists(k) { return nil, ErrKeyNotFound } @@ -521,6 +636,8 @@ func (m *Miniredis) ZRem(k, member string) (bool, error) { func (db *RedisDB) ZRem(k, member string) (bool, error) { db.master.Lock() defer db.master.Unlock() + defer db.master.signal.Broadcast() + if !db.exists(k) { return false, ErrKeyNotFound } @@ -539,6 +656,7 @@ func (m *Miniredis) ZScore(k, member string) (float64, error) { func (db *RedisDB) ZScore(k, member string) (float64, error) { db.master.Lock() defer db.master.Unlock() + if !db.exists(k) { return 0, ErrKeyNotFound } @@ -547,3 +665,131 @@ func (db *RedisDB) ZScore(k, member string) (float64, error) { } return db.ssetScore(k, member), nil } + +// XAdd adds an entry to a stream. `id` can be left empty or be '*'. +// If a value is given normal XADD rules apply. Values should be an even +// length. +func (m *Miniredis) XAdd(k string, id string, values []string) (string, error) { + return m.DB(m.selectedDB).XAdd(k, id, values) +} + +// XAdd adds an entry to a stream. `id` can be left empty or be '*'. +// If a value is given normal XADD rules apply. Values should be an even +// length. +func (db *RedisDB) XAdd(k string, id string, values []string) (string, error) { + db.master.Lock() + defer db.master.Unlock() + defer db.master.signal.Broadcast() + + s, err := db.stream(k) + if err != nil { + return "", err + } + if s == nil { + s, _ = db.newStream(k) + } + + return s.add(id, values, db.master.effectiveNow()) +} + +// Stream returns a slice of stream entries. Oldest first. +func (m *Miniredis) Stream(k string) ([]StreamEntry, error) { + return m.DB(m.selectedDB).Stream(k) +} + +// Stream returns a slice of stream entries. Oldest first. +func (db *RedisDB) Stream(key string) ([]StreamEntry, error) { + db.master.Lock() + defer db.master.Unlock() + + s, err := db.stream(key) + if err != nil { + return nil, err + } + if s == nil { + return nil, nil + } + return s.entries, nil +} + +// Publish a message to subscribers. Returns the number of receivers. +func (m *Miniredis) Publish(channel, message string) int { + m.Lock() + defer m.Unlock() + + return m.publish(channel, message) +} + +// PubSubChannels is "PUBSUB CHANNELS ". An empty pattern is fine +// (meaning all channels). +// Returned channels will be ordered alphabetically. +func (m *Miniredis) PubSubChannels(pattern string) []string { + m.Lock() + defer m.Unlock() + + return activeChannels(m.allSubscribers(), pattern) +} + +// PubSubNumSub is "PUBSUB NUMSUB [channels]". It returns all channels with their +// subscriber count. +func (m *Miniredis) PubSubNumSub(channels ...string) map[string]int { + m.Lock() + defer m.Unlock() + + subs := m.allSubscribers() + res := map[string]int{} + for _, channel := range channels { + res[channel] = countSubs(subs, channel) + } + return res +} + +// PubSubNumPat is "PUBSUB NUMPAT" +func (m *Miniredis) PubSubNumPat() int { + m.Lock() + defer m.Unlock() + + return countPsubs(m.allSubscribers()) +} + +// PfAdd adds keys to a hll. Returns the flag which equals to 1 if the inner hll value has been changed. +func (m *Miniredis) PfAdd(k string, elems ...string) (int, error) { + return m.DB(m.selectedDB).HllAdd(k, elems...) +} + +// HllAdd adds keys to a hll. Returns the flag which equals to true if the inner hll value has been changed. +func (db *RedisDB) HllAdd(k string, elems ...string) (int, error) { + db.master.Lock() + defer db.master.Unlock() + + if db.exists(k) && db.t(k) != "hll" { + return 0, ErrWrongType + } + return db.hllAdd(k, elems...), nil +} + +// PfCount returns an estimation of the amount of elements previously added to a hll. +func (m *Miniredis) PfCount(keys ...string) (int, error) { + return m.DB(m.selectedDB).HllCount(keys...) +} + +// HllCount returns an estimation of the amount of elements previously added to a hll. +func (db *RedisDB) HllCount(keys ...string) (int, error) { + db.master.Lock() + defer db.master.Unlock() + + return db.hllCount(keys) +} + +// PfMerge merges all the input hlls into a hll under destKey key. +func (m *Miniredis) PfMerge(destKey string, sourceKeys ...string) error { + return m.DB(m.selectedDB).HllMerge(destKey, sourceKeys...) +} + +// HllMerge merges all the input hlls into a hll under destKey key. +func (db *RedisDB) HllMerge(destKey string, sourceKeys ...string) error { + db.master.Lock() + defer db.master.Unlock() + + return db.hllMerge(append([]string{destKey}, sourceKeys...)) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/geo.go b/vendor/github.com/alicebob/miniredis/v2/geo.go new file mode 100644 index 00000000..bc8e9292 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/geo.go @@ -0,0 +1,48 @@ +package miniredis + +import ( + "math" + + "github.com/alicebob/miniredis/v2/geohash" +) + +func toGeohash(long, lat float64) uint64 { + return geohash.EncodeIntWithPrecision(lat, long, 52) +} + +func fromGeohash(score uint64) (float64, float64) { + lat, long := geohash.DecodeIntWithPrecision(score, 52) + return long, lat +} + +// haversin(θ) function +func hsin(theta float64) float64 { + return math.Pow(math.Sin(theta/2), 2) +} + +// distance function returns the distance (in meters) between two points of +// a given longitude and latitude relatively accurately (using a spherical +// approximation of the Earth) through the Haversin Distance Formula for +// great arc distance on a sphere with accuracy for small distances +// +// point coordinates are supplied in degrees and converted into rad. in the func +// +// distance returned is meters +// http://en.wikipedia.org/wiki/Haversine_formula +// Source: https://gist.github.com/cdipaolo/d3f8db3848278b49db68 +func distance(lat1, lon1, lat2, lon2 float64) float64 { + // convert to radians + // must cast radius as float to multiply later + var la1, lo1, la2, lo2 float64 + la1 = lat1 * math.Pi / 180 + lo1 = lon1 * math.Pi / 180 + la2 = lat2 * math.Pi / 180 + lo2 = lon2 * math.Pi / 180 + + earth := 6372797.560856 // Earth radius in METERS, according to src/geohash_helper.c + + // calculate + h := hsin(la2-la1) + math.Cos(la1)*math.Cos(la2)*hsin(lo2-lo1) + + return 2 * earth * math.Asin(math.Sqrt(h)) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/vendor/github.com/alicebob/miniredis/v2/geohash/LICENSE similarity index 96% rename from vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE rename to vendor/github.com/alicebob/miniredis/v2/geohash/LICENSE index 91b5cef3..c0190c9a 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE +++ b/vendor/github.com/alicebob/miniredis/v2/geohash/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2016 Yasuhiro Matsumoto +Copyright (c) 2015 Michael McLoughlin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -19,3 +19,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/alicebob/miniredis/v2/geohash/README.md b/vendor/github.com/alicebob/miniredis/v2/geohash/README.md new file mode 100644 index 00000000..c1a12d14 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/geohash/README.md @@ -0,0 +1,2 @@ +This is a (selected) copy of github.com/mmcloughlin/geohash with the latitude +range changed from 90 to ~85, to align with the algorithm use by Redis. diff --git a/vendor/github.com/alicebob/miniredis/v2/geohash/base32.go b/vendor/github.com/alicebob/miniredis/v2/geohash/base32.go new file mode 100644 index 00000000..916b272b --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/geohash/base32.go @@ -0,0 +1,44 @@ +package geohash + +// encoding encapsulates an encoding defined by a given base32 alphabet. +type encoding struct { + encode string + decode [256]byte +} + +// newEncoding constructs a new encoding defined by the given alphabet, +// which must be a 32-byte string. +func newEncoding(encoder string) *encoding { + e := new(encoding) + e.encode = encoder + for i := 0; i < len(e.decode); i++ { + e.decode[i] = 0xff + } + for i := 0; i < len(encoder); i++ { + e.decode[encoder[i]] = byte(i) + } + return e +} + +// Decode string into bits of a 64-bit word. The string s may be at most 12 +// characters. +func (e *encoding) Decode(s string) uint64 { + x := uint64(0) + for i := 0; i < len(s); i++ { + x = (x << 5) | uint64(e.decode[s[i]]) + } + return x +} + +// Encode bits of 64-bit word into a string. +func (e *encoding) Encode(x uint64) string { + b := [12]byte{} + for i := 0; i < 12; i++ { + b[11-i] = e.encode[x&0x1f] + x >>= 5 + } + return string(b[:]) +} + +// Base32Encoding with the Geohash alphabet. +var base32encoding = newEncoding("0123456789bcdefghjkmnpqrstuvwxyz") diff --git a/vendor/github.com/alicebob/miniredis/v2/geohash/geohash.go b/vendor/github.com/alicebob/miniredis/v2/geohash/geohash.go new file mode 100644 index 00000000..0e0ca2b2 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/geohash/geohash.go @@ -0,0 +1,269 @@ +// Package geohash provides encoding and decoding of string and integer +// geohashes. +package geohash + +import ( + "math" +) + +const ( + ENC_LAT = 85.05112878 + ENC_LONG = 180.0 +) + +// Direction represents directions in the latitute/longitude space. +type Direction int + +// Cardinal and intercardinal directions +const ( + North Direction = iota + NorthEast + East + SouthEast + South + SouthWest + West + NorthWest +) + +// Encode the point (lat, lng) as a string geohash with the standard 12 +// characters of precision. +func Encode(lat, lng float64) string { + return EncodeWithPrecision(lat, lng, 12) +} + +// EncodeWithPrecision encodes the point (lat, lng) as a string geohash with +// the specified number of characters of precision (max 12). +func EncodeWithPrecision(lat, lng float64, chars uint) string { + bits := 5 * chars + inthash := EncodeIntWithPrecision(lat, lng, bits) + enc := base32encoding.Encode(inthash) + return enc[12-chars:] +} + +// encodeInt provides a Go implementation of integer geohash. This is the +// default implementation of EncodeInt, but optimized versions are provided +// for certain architectures. +func EncodeInt(lat, lng float64) uint64 { + latInt := encodeRange(lat, ENC_LAT) + lngInt := encodeRange(lng, ENC_LONG) + return interleave(latInt, lngInt) +} + +// EncodeIntWithPrecision encodes the point (lat, lng) to an integer with the +// specified number of bits. +func EncodeIntWithPrecision(lat, lng float64, bits uint) uint64 { + hash := EncodeInt(lat, lng) + return hash >> (64 - bits) +} + +// Box represents a rectangle in latitude/longitude space. +type Box struct { + MinLat float64 + MaxLat float64 + MinLng float64 + MaxLng float64 +} + +// Center returns the center of the box. +func (b Box) Center() (lat, lng float64) { + lat = (b.MinLat + b.MaxLat) / 2.0 + lng = (b.MinLng + b.MaxLng) / 2.0 + return +} + +// Contains decides whether (lat, lng) is contained in the box. The +// containment test is inclusive of the edges and corners. +func (b Box) Contains(lat, lng float64) bool { + return (b.MinLat <= lat && lat <= b.MaxLat && + b.MinLng <= lng && lng <= b.MaxLng) +} + +// errorWithPrecision returns the error range in latitude and longitude for in +// integer geohash with bits of precision. +func errorWithPrecision(bits uint) (latErr, lngErr float64) { + b := int(bits) + latBits := b / 2 + lngBits := b - latBits + latErr = math.Ldexp(180.0, -latBits) + lngErr = math.Ldexp(360.0, -lngBits) + return +} + +// BoundingBox returns the region encoded by the given string geohash. +func BoundingBox(hash string) Box { + bits := uint(5 * len(hash)) + inthash := base32encoding.Decode(hash) + return BoundingBoxIntWithPrecision(inthash, bits) +} + +// BoundingBoxIntWithPrecision returns the region encoded by the integer +// geohash with the specified precision. +func BoundingBoxIntWithPrecision(hash uint64, bits uint) Box { + fullHash := hash << (64 - bits) + latInt, lngInt := deinterleave(fullHash) + lat := decodeRange(latInt, ENC_LAT) + lng := decodeRange(lngInt, ENC_LONG) + latErr, lngErr := errorWithPrecision(bits) + return Box{ + MinLat: lat, + MaxLat: lat + latErr, + MinLng: lng, + MaxLng: lng + lngErr, + } +} + +// BoundingBoxInt returns the region encoded by the given 64-bit integer +// geohash. +func BoundingBoxInt(hash uint64) Box { + return BoundingBoxIntWithPrecision(hash, 64) +} + +// DecodeCenter decodes the string geohash to the central point of the bounding box. +func DecodeCenter(hash string) (lat, lng float64) { + box := BoundingBox(hash) + return box.Center() +} + +// DecodeIntWithPrecision decodes the provided integer geohash with bits of +// precision to a (lat, lng) point. +func DecodeIntWithPrecision(hash uint64, bits uint) (lat, lng float64) { + box := BoundingBoxIntWithPrecision(hash, bits) + return box.Center() +} + +// DecodeInt decodes the provided 64-bit integer geohash to a (lat, lng) point. +func DecodeInt(hash uint64) (lat, lng float64) { + return DecodeIntWithPrecision(hash, 64) +} + +// Neighbors returns a slice of geohash strings that correspond to the provided +// geohash's neighbors. +func Neighbors(hash string) []string { + box := BoundingBox(hash) + lat, lng := box.Center() + latDelta := box.MaxLat - box.MinLat + lngDelta := box.MaxLng - box.MinLng + precision := uint(len(hash)) + return []string{ + // N + EncodeWithPrecision(lat+latDelta, lng, precision), + // NE, + EncodeWithPrecision(lat+latDelta, lng+lngDelta, precision), + // E, + EncodeWithPrecision(lat, lng+lngDelta, precision), + // SE, + EncodeWithPrecision(lat-latDelta, lng+lngDelta, precision), + // S, + EncodeWithPrecision(lat-latDelta, lng, precision), + // SW, + EncodeWithPrecision(lat-latDelta, lng-lngDelta, precision), + // W, + EncodeWithPrecision(lat, lng-lngDelta, precision), + // NW + EncodeWithPrecision(lat+latDelta, lng-lngDelta, precision), + } +} + +// NeighborsInt returns a slice of uint64s that correspond to the provided hash's +// neighbors at 64-bit precision. +func NeighborsInt(hash uint64) []uint64 { + return NeighborsIntWithPrecision(hash, 64) +} + +// NeighborsIntWithPrecision returns a slice of uint64s that correspond to the +// provided hash's neighbors at the given precision. +func NeighborsIntWithPrecision(hash uint64, bits uint) []uint64 { + box := BoundingBoxIntWithPrecision(hash, bits) + lat, lng := box.Center() + latDelta := box.MaxLat - box.MinLat + lngDelta := box.MaxLng - box.MinLng + return []uint64{ + // N + EncodeIntWithPrecision(lat+latDelta, lng, bits), + // NE, + EncodeIntWithPrecision(lat+latDelta, lng+lngDelta, bits), + // E, + EncodeIntWithPrecision(lat, lng+lngDelta, bits), + // SE, + EncodeIntWithPrecision(lat-latDelta, lng+lngDelta, bits), + // S, + EncodeIntWithPrecision(lat-latDelta, lng, bits), + // SW, + EncodeIntWithPrecision(lat-latDelta, lng-lngDelta, bits), + // W, + EncodeIntWithPrecision(lat, lng-lngDelta, bits), + // NW + EncodeIntWithPrecision(lat+latDelta, lng-lngDelta, bits), + } +} + +// Neighbor returns a geohash string that corresponds to the provided +// geohash's neighbor in the provided direction +func Neighbor(hash string, direction Direction) string { + return Neighbors(hash)[direction] +} + +// NeighborInt returns a uint64 that corresponds to the provided hash's +// neighbor in the provided direction at 64-bit precision. +func NeighborInt(hash uint64, direction Direction) uint64 { + return NeighborsIntWithPrecision(hash, 64)[direction] +} + +// NeighborIntWithPrecision returns a uint64s that corresponds to the +// provided hash's neighbor in the provided direction at the given precision. +func NeighborIntWithPrecision(hash uint64, bits uint, direction Direction) uint64 { + return NeighborsIntWithPrecision(hash, bits)[direction] +} + +// precalculated for performance +var exp232 = math.Exp2(32) + +// Encode the position of x within the range -r to +r as a 32-bit integer. +func encodeRange(x, r float64) uint32 { + p := (x + r) / (2 * r) + return uint32(p * exp232) +} + +// Decode the 32-bit range encoding X back to a value in the range -r to +r. +func decodeRange(X uint32, r float64) float64 { + p := float64(X) / exp232 + x := 2*r*p - r + return x +} + +// Spread out the 32 bits of x into 64 bits, where the bits of x occupy even +// bit positions. +func spread(x uint32) uint64 { + X := uint64(x) + X = (X | (X << 16)) & 0x0000ffff0000ffff + X = (X | (X << 8)) & 0x00ff00ff00ff00ff + X = (X | (X << 4)) & 0x0f0f0f0f0f0f0f0f + X = (X | (X << 2)) & 0x3333333333333333 + X = (X | (X << 1)) & 0x5555555555555555 + return X +} + +// Interleave the bits of x and y. In the result, x and y occupy even and odd +// bitlevels, respectively. +func interleave(x, y uint32) uint64 { + return spread(x) | (spread(y) << 1) +} + +// Squash the even bitlevels of X into a 32-bit word. Odd bitlevels of X are +// ignored, and may take any value. +func squash(X uint64) uint32 { + X &= 0x5555555555555555 + X = (X | (X >> 1)) & 0x3333333333333333 + X = (X | (X >> 2)) & 0x0f0f0f0f0f0f0f0f + X = (X | (X >> 4)) & 0x00ff00ff00ff00ff + X = (X | (X >> 8)) & 0x0000ffff0000ffff + X = (X | (X >> 16)) & 0x00000000ffffffff + return uint32(X) +} + +// Deinterleave the bits of X into 32-bit words containing the even and odd +// bitlevels of X, respectively. +func deinterleave(X uint64) (uint32, uint32) { + return squash(X), squash(X >> 1) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/hll.go b/vendor/github.com/alicebob/miniredis/v2/hll.go new file mode 100644 index 00000000..2f55fac9 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/hll.go @@ -0,0 +1,36 @@ +package miniredis + +import ( + "github.com/alicebob/miniredis/v2/hyperloglog" +) + +type hll struct { + inner *hyperloglog.Sketch +} + +func newHll() *hll { + return &hll{ + inner: hyperloglog.New14(), + } +} + +// Add returns true if cardinality has been changed, or false otherwise. +func (h *hll) Add(item []byte) bool { + return h.inner.Insert(item) +} + +// Count returns the estimation of a set cardinality. +func (h *hll) Count() int { + return int(h.inner.Estimate()) +} + +// Merge merges the other hll into original one (not making a copy but doing this in place). +func (h *hll) Merge(other *hll) { + _ = h.inner.Merge(other.inner) +} + +// Bytes returns raw-bytes representation of hll data structure. +func (h *hll) Bytes() []byte { + dataBytes, _ := h.inner.MarshalBinary() + return dataBytes +} diff --git a/vendor/github.com/nxadm/tail/LICENSE b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/LICENSE similarity index 89% rename from vendor/github.com/nxadm/tail/LICENSE rename to vendor/github.com/alicebob/miniredis/v2/hyperloglog/LICENSE index 818d802a..8436fdb4 100644 --- a/vendor/github.com/nxadm/tail/LICENSE +++ b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/LICENSE @@ -1,7 +1,6 @@ -# The MIT License (MIT) +MIT License -# © Copyright 2015 Hewlett Packard Enterprise Development LP -Copyright (c) 2014 ActiveState +Copyright (c) 2017 Axiom Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,6 +8,7 @@ in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. diff --git a/vendor/github.com/alicebob/miniredis/v2/hyperloglog/README.md b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/README.md new file mode 100644 index 00000000..0fac68df --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/README.md @@ -0,0 +1 @@ +This is a copy of github.com/axiomhq/hyperloglog. \ No newline at end of file diff --git a/vendor/github.com/alicebob/miniredis/v2/hyperloglog/compressed.go b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/compressed.go new file mode 100644 index 00000000..4b908be4 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/compressed.go @@ -0,0 +1,180 @@ +package hyperloglog + +import "encoding/binary" + +// Original author of this file is github.com/clarkduvall/hyperloglog +type iterable interface { + decode(i int, last uint32) (uint32, int) + Len() int + Iter() *iterator +} + +type iterator struct { + i int + last uint32 + v iterable +} + +func (iter *iterator) Next() uint32 { + n, i := iter.v.decode(iter.i, iter.last) + iter.last = n + iter.i = i + return n +} + +func (iter *iterator) Peek() uint32 { + n, _ := iter.v.decode(iter.i, iter.last) + return n +} + +func (iter iterator) HasNext() bool { + return iter.i < iter.v.Len() +} + +type compressedList struct { + count uint32 + last uint32 + b variableLengthList +} + +func (v *compressedList) Clone() *compressedList { + if v == nil { + return nil + } + + newV := &compressedList{ + count: v.count, + last: v.last, + } + + newV.b = make(variableLengthList, len(v.b)) + copy(newV.b, v.b) + return newV +} + +func (v *compressedList) MarshalBinary() (data []byte, err error) { + // Marshal the variableLengthList + bdata, err := v.b.MarshalBinary() + if err != nil { + return nil, err + } + + // At least 4 bytes for the two fixed sized values plus the size of bdata. + data = make([]byte, 0, 4+4+len(bdata)) + + // Marshal the count and last values. + data = append(data, []byte{ + // Number of items in the list. + byte(v.count >> 24), + byte(v.count >> 16), + byte(v.count >> 8), + byte(v.count), + // The last item in the list. + byte(v.last >> 24), + byte(v.last >> 16), + byte(v.last >> 8), + byte(v.last), + }...) + + // Append the list + return append(data, bdata...), nil +} + +func (v *compressedList) UnmarshalBinary(data []byte) error { + if len(data) < 12 { + return ErrorTooShort + } + + // Set the count. + v.count, data = binary.BigEndian.Uint32(data[:4]), data[4:] + + // Set the last value. + v.last, data = binary.BigEndian.Uint32(data[:4]), data[4:] + + // Set the list. + sz, data := binary.BigEndian.Uint32(data[:4]), data[4:] + v.b = make([]uint8, sz) + if uint32(len(data)) < sz { + return ErrorTooShort + } + for i := uint32(0); i < sz; i++ { + v.b[i] = data[i] + } + return nil +} + +func newCompressedList() *compressedList { + v := &compressedList{} + v.b = make(variableLengthList, 0) + return v +} + +func (v *compressedList) Len() int { + return len(v.b) +} + +func (v *compressedList) decode(i int, last uint32) (uint32, int) { + n, i := v.b.decode(i, last) + return n + last, i +} + +func (v *compressedList) Append(x uint32) { + v.count++ + v.b = v.b.Append(x - v.last) + v.last = x +} + +func (v *compressedList) Iter() *iterator { + return &iterator{0, 0, v} +} + +type variableLengthList []uint8 + +func (v variableLengthList) MarshalBinary() (data []byte, err error) { + // 4 bytes for the size of the list, and a byte for each element in the + // list. + data = make([]byte, 0, 4+v.Len()) + + // Length of the list. We only need 32 bits because the size of the set + // couldn't exceed that on 32 bit architectures. + sz := v.Len() + data = append(data, []byte{ + byte(sz >> 24), + byte(sz >> 16), + byte(sz >> 8), + byte(sz), + }...) + + // Marshal each element in the list. + for i := 0; i < sz; i++ { + data = append(data, v[i]) + } + + return data, nil +} + +func (v variableLengthList) Len() int { + return len(v) +} + +func (v *variableLengthList) Iter() *iterator { + return &iterator{0, 0, v} +} + +func (v variableLengthList) decode(i int, last uint32) (uint32, int) { + var x uint32 + j := i + for ; v[j]&0x80 != 0; j++ { + x |= uint32(v[j]&0x7f) << (uint(j-i) * 7) + } + x |= uint32(v[j]) << (uint(j-i) * 7) + return x, j + 1 +} + +func (v variableLengthList) Append(x uint32) variableLengthList { + for x&0xffffff80 != 0 { + v = append(v, uint8((x&0x7f)|0x80)) + x >>= 7 + } + return append(v, uint8(x&0x7f)) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/hyperloglog/hyperloglog.go b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/hyperloglog.go new file mode 100644 index 00000000..82663915 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/hyperloglog.go @@ -0,0 +1,424 @@ +package hyperloglog + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "sort" +) + +const ( + capacity = uint8(16) + pp = uint8(25) + mp = uint32(1) << pp + version = 1 +) + +// Sketch is a HyperLogLog data-structure for the count-distinct problem, +// approximating the number of distinct elements in a multiset. +type Sketch struct { + p uint8 + b uint8 + m uint32 + alpha float64 + tmpSet set + sparseList *compressedList + regs *registers +} + +// New returns a HyperLogLog Sketch with 2^14 registers (precision 14) +func New() *Sketch { + return New14() +} + +// New14 returns a HyperLogLog Sketch with 2^14 registers (precision 14) +func New14() *Sketch { + sk, _ := newSketch(14, true) + return sk +} + +// New16 returns a HyperLogLog Sketch with 2^16 registers (precision 16) +func New16() *Sketch { + sk, _ := newSketch(16, true) + return sk +} + +// NewNoSparse returns a HyperLogLog Sketch with 2^14 registers (precision 14) +// that will not use a sparse representation +func NewNoSparse() *Sketch { + sk, _ := newSketch(14, false) + return sk +} + +// New16NoSparse returns a HyperLogLog Sketch with 2^16 registers (precision 16) +// that will not use a sparse representation +func New16NoSparse() *Sketch { + sk, _ := newSketch(16, false) + return sk +} + +// newSketch returns a HyperLogLog Sketch with 2^precision registers +func newSketch(precision uint8, sparse bool) (*Sketch, error) { + if precision < 4 || precision > 18 { + return nil, fmt.Errorf("p has to be >= 4 and <= 18") + } + m := uint32(math.Pow(2, float64(precision))) + s := &Sketch{ + m: m, + p: precision, + alpha: alpha(float64(m)), + } + if sparse { + s.tmpSet = set{} + s.sparseList = newCompressedList() + } else { + s.regs = newRegisters(m) + } + return s, nil +} + +func (sk *Sketch) sparse() bool { + return sk.sparseList != nil +} + +// Clone returns a deep copy of sk. +func (sk *Sketch) Clone() *Sketch { + return &Sketch{ + b: sk.b, + p: sk.p, + m: sk.m, + alpha: sk.alpha, + tmpSet: sk.tmpSet.Clone(), + sparseList: sk.sparseList.Clone(), + regs: sk.regs.clone(), + } +} + +// Converts to normal if the sparse list is too large. +func (sk *Sketch) maybeToNormal() { + if uint32(len(sk.tmpSet))*100 > sk.m { + sk.mergeSparse() + if uint32(sk.sparseList.Len()) > sk.m { + sk.toNormal() + } + } +} + +// Merge takes another Sketch and combines it with Sketch h. +// If Sketch h is using the sparse Sketch, it will be converted +// to the normal Sketch. +func (sk *Sketch) Merge(other *Sketch) error { + if other == nil { + // Nothing to do + return nil + } + cpOther := other.Clone() + + if sk.p != cpOther.p { + return errors.New("precisions must be equal") + } + + if sk.sparse() && other.sparse() { + for k := range other.tmpSet { + sk.tmpSet.add(k) + } + for iter := other.sparseList.Iter(); iter.HasNext(); { + sk.tmpSet.add(iter.Next()) + } + sk.maybeToNormal() + return nil + } + + if sk.sparse() { + sk.toNormal() + } + + if cpOther.sparse() { + for k := range cpOther.tmpSet { + i, r := decodeHash(k, cpOther.p, pp) + sk.insert(i, r) + } + + for iter := cpOther.sparseList.Iter(); iter.HasNext(); { + i, r := decodeHash(iter.Next(), cpOther.p, pp) + sk.insert(i, r) + } + } else { + if sk.b < cpOther.b { + sk.regs.rebase(cpOther.b - sk.b) + sk.b = cpOther.b + } else { + cpOther.regs.rebase(sk.b - cpOther.b) + cpOther.b = sk.b + } + + for i, v := range cpOther.regs.tailcuts { + v1 := v.get(0) + if v1 > sk.regs.get(uint32(i)*2) { + sk.regs.set(uint32(i)*2, v1) + } + v2 := v.get(1) + if v2 > sk.regs.get(1+uint32(i)*2) { + sk.regs.set(1+uint32(i)*2, v2) + } + } + } + return nil +} + +// Convert from sparse Sketch to dense Sketch. +func (sk *Sketch) toNormal() { + if len(sk.tmpSet) > 0 { + sk.mergeSparse() + } + + sk.regs = newRegisters(sk.m) + for iter := sk.sparseList.Iter(); iter.HasNext(); { + i, r := decodeHash(iter.Next(), sk.p, pp) + sk.insert(i, r) + } + + sk.tmpSet = nil + sk.sparseList = nil +} + +func (sk *Sketch) insert(i uint32, r uint8) bool { + changed := false + if r-sk.b >= capacity { + //overflow + db := sk.regs.min() + if db > 0 { + sk.b += db + sk.regs.rebase(db) + changed = true + } + } + if r > sk.b { + val := r - sk.b + if c1 := capacity - 1; c1 < val { + val = c1 + } + + if val > sk.regs.get(i) { + sk.regs.set(i, val) + changed = true + } + } + return changed +} + +// Insert adds element e to sketch +func (sk *Sketch) Insert(e []byte) bool { + x := hash(e) + return sk.InsertHash(x) +} + +// InsertHash adds hash x to sketch +func (sk *Sketch) InsertHash(x uint64) bool { + if sk.sparse() { + changed := sk.tmpSet.add(encodeHash(x, sk.p, pp)) + if !changed { + return false + } + if uint32(len(sk.tmpSet))*100 > sk.m/2 { + sk.mergeSparse() + if uint32(sk.sparseList.Len()) > sk.m/2 { + sk.toNormal() + } + } + return true + } else { + i, r := getPosVal(x, sk.p) + return sk.insert(uint32(i), r) + } +} + +// Estimate returns the cardinality of the Sketch +func (sk *Sketch) Estimate() uint64 { + if sk.sparse() { + sk.mergeSparse() + return uint64(linearCount(mp, mp-sk.sparseList.count)) + } + + sum, ez := sk.regs.sumAndZeros(sk.b) + m := float64(sk.m) + var est float64 + + var beta func(float64) float64 + if sk.p < 16 { + beta = beta14 + } else { + beta = beta16 + } + + if sk.b == 0 { + est = (sk.alpha * m * (m - ez) / (sum + beta(ez))) + } else { + est = (sk.alpha * m * m / sum) + } + + return uint64(est + 0.5) +} + +func (sk *Sketch) mergeSparse() { + if len(sk.tmpSet) == 0 { + return + } + + keys := make(uint64Slice, 0, len(sk.tmpSet)) + for k := range sk.tmpSet { + keys = append(keys, k) + } + sort.Sort(keys) + + newList := newCompressedList() + for iter, i := sk.sparseList.Iter(), 0; iter.HasNext() || i < len(keys); { + if !iter.HasNext() { + newList.Append(keys[i]) + i++ + continue + } + + if i >= len(keys) { + newList.Append(iter.Next()) + continue + } + + x1, x2 := iter.Peek(), keys[i] + if x1 == x2 { + newList.Append(iter.Next()) + i++ + } else if x1 > x2 { + newList.Append(x2) + i++ + } else { + newList.Append(iter.Next()) + } + } + + sk.sparseList = newList + sk.tmpSet = set{} +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (sk *Sketch) MarshalBinary() (data []byte, err error) { + // Marshal a version marker. + data = append(data, version) + // Marshal p. + data = append(data, sk.p) + // Marshal b + data = append(data, sk.b) + + if sk.sparse() { + // It's using the sparse Sketch. + data = append(data, byte(1)) + + // Add the tmp_set + tsdata, err := sk.tmpSet.MarshalBinary() + if err != nil { + return nil, err + } + data = append(data, tsdata...) + + // Add the sparse Sketch + sdata, err := sk.sparseList.MarshalBinary() + if err != nil { + return nil, err + } + return append(data, sdata...), nil + } + + // It's using the dense Sketch. + data = append(data, byte(0)) + + // Add the dense sketch Sketch. + sz := len(sk.regs.tailcuts) + data = append(data, []byte{ + byte(sz >> 24), + byte(sz >> 16), + byte(sz >> 8), + byte(sz), + }...) + + // Marshal each element in the list. + for i := 0; i < len(sk.regs.tailcuts); i++ { + data = append(data, byte(sk.regs.tailcuts[i])) + } + + return data, nil +} + +// ErrorTooShort is an error that UnmarshalBinary try to parse too short +// binary. +var ErrorTooShort = errors.New("too short binary") + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (sk *Sketch) UnmarshalBinary(data []byte) error { + if len(data) < 8 { + return ErrorTooShort + } + + // Unmarshal version. We may need this in the future if we make + // non-compatible changes. + _ = data[0] + + // Unmarshal p. + p := data[1] + + // Unmarshal b. + sk.b = data[2] + + // Determine if we need a sparse Sketch + sparse := data[3] == byte(1) + + // Make a newSketch Sketch if the precision doesn't match or if the Sketch was used + if sk.p != p || sk.regs != nil || len(sk.tmpSet) > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) { + newh, err := newSketch(p, sparse) + if err != nil { + return err + } + newh.b = sk.b + *sk = *newh + } + + // h is now initialised with the correct p. We just need to fill the + // rest of the details out. + if sparse { + // Using the sparse Sketch. + + // Unmarshal the tmp_set. + tssz := binary.BigEndian.Uint32(data[4:8]) + sk.tmpSet = make(map[uint32]struct{}, tssz) + + // We need to unmarshal tssz values in total, and each value requires us + // to read 4 bytes. + tsLastByte := int((tssz * 4) + 8) + for i := 8; i < tsLastByte; i += 4 { + k := binary.BigEndian.Uint32(data[i : i+4]) + sk.tmpSet[k] = struct{}{} + } + + // Unmarshal the sparse Sketch. + return sk.sparseList.UnmarshalBinary(data[tsLastByte:]) + } + + // Using the dense Sketch. + sk.sparseList = nil + sk.tmpSet = nil + dsz := binary.BigEndian.Uint32(data[4:8]) + sk.regs = newRegisters(dsz * 2) + data = data[8:] + + for i, val := range data { + sk.regs.tailcuts[i] = reg(val) + if uint8(sk.regs.tailcuts[i]<<4>>4) > 0 { + sk.regs.nz-- + } + if uint8(sk.regs.tailcuts[i]>>4) > 0 { + sk.regs.nz-- + } + } + + return nil +} diff --git a/vendor/github.com/alicebob/miniredis/v2/hyperloglog/registers.go b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/registers.go new file mode 100644 index 00000000..19bb5d47 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/registers.go @@ -0,0 +1,114 @@ +package hyperloglog + +import ( + "math" +) + +type reg uint8 +type tailcuts []reg + +type registers struct { + tailcuts + nz uint32 +} + +func (r *reg) set(offset, val uint8) bool { + var isZero bool + if offset == 0 { + isZero = *r < 16 + tmpVal := uint8((*r) << 4 >> 4) + *r = reg(tmpVal | (val << 4)) + } else { + isZero = *r&0x0f == 0 + tmpVal := uint8((*r) >> 4 << 4) + *r = reg(tmpVal | val) + } + return isZero +} + +func (r *reg) get(offset uint8) uint8 { + if offset == 0 { + return uint8((*r) >> 4) + } + return uint8((*r) << 4 >> 4) +} + +func newRegisters(size uint32) *registers { + return ®isters{ + tailcuts: make(tailcuts, size/2), + nz: size, + } +} + +func (rs *registers) clone() *registers { + if rs == nil { + return nil + } + tc := make([]reg, len(rs.tailcuts)) + copy(tc, rs.tailcuts) + return ®isters{ + tailcuts: tc, + nz: rs.nz, + } +} + +func (rs *registers) rebase(delta uint8) { + nz := uint32(len(rs.tailcuts)) * 2 + for i := range rs.tailcuts { + for j := uint8(0); j < 2; j++ { + val := rs.tailcuts[i].get(j) + if val >= delta { + rs.tailcuts[i].set(j, val-delta) + if val-delta > 0 { + nz-- + } + } + } + } + rs.nz = nz +} + +func (rs *registers) set(i uint32, val uint8) { + offset, index := uint8(i)&1, i/2 + if rs.tailcuts[index].set(offset, val) { + rs.nz-- + } +} + +func (rs *registers) get(i uint32) uint8 { + offset, index := uint8(i)&1, i/2 + return rs.tailcuts[index].get(offset) +} + +func (rs *registers) sumAndZeros(base uint8) (res, ez float64) { + for _, r := range rs.tailcuts { + for j := uint8(0); j < 2; j++ { + v := float64(base + r.get(j)) + if v == 0 { + ez++ + } + res += 1.0 / math.Pow(2.0, v) + } + } + rs.nz = uint32(ez) + return res, ez +} + +func (rs *registers) min() uint8 { + if rs.nz > 0 { + return 0 + } + min := uint8(math.MaxUint8) + for _, r := range rs.tailcuts { + if r == 0 || min == 0 { + return 0 + } + if val := uint8(r << 4 >> 4); val < min { + min = val + } + if val := uint8(r >> 4); val < min { + min = val + } + } + return min +} diff --git a/vendor/github.com/alicebob/miniredis/v2/hyperloglog/sparse.go b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/sparse.go new file mode 100644 index 00000000..8c457d32 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/sparse.go @@ -0,0 +1,92 @@ +package hyperloglog + +import ( + "math/bits" +) + +func getIndex(k uint32, p, pp uint8) uint32 { + if k&1 == 1 { + return bextr32(k, 32-p, p) + } + return bextr32(k, pp-p+1, p) +} + +// Encode a hash to be used in the sparse representation. +func encodeHash(x uint64, p, pp uint8) uint32 { + idx := uint32(bextr(x, 64-pp, pp)) + if bextr(x, 64-pp, pp-p) == 0 { + zeros := bits.LeadingZeros64((bextr(x, 0, 64-pp)<> 24), + byte(sl >> 16), + byte(sl >> 8), + byte(sl), + }...) + + // Marshal each element in the set. + for k := range s { + data = append(data, []byte{ + byte(k >> 24), + byte(k >> 16), + byte(k >> 8), + byte(k), + }...) + } + + return data, nil +} + +type uint64Slice []uint32 + +func (p uint64Slice) Len() int { return len(p) } +func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/alicebob/miniredis/v2/hyperloglog/utils.go b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/utils.go new file mode 100644 index 00000000..896bf7e7 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/hyperloglog/utils.go @@ -0,0 +1,69 @@ +package hyperloglog + +import ( + "github.com/alicebob/miniredis/v2/metro" + "math" + "math/bits" +) + +var hash = hashFunc + +func beta14(ez float64) float64 { + zl := math.Log(ez + 1) + return -0.370393911*ez + + 0.070471823*zl + + 0.17393686*math.Pow(zl, 2) + + 0.16339839*math.Pow(zl, 3) + + -0.09237745*math.Pow(zl, 4) + + 0.03738027*math.Pow(zl, 5) + + -0.005384159*math.Pow(zl, 6) + + 0.00042419*math.Pow(zl, 7) +} + +func beta16(ez float64) float64 { + zl := math.Log(ez + 1) + return -0.37331876643753059*ez + + -1.41704077448122989*zl + + 0.40729184796612533*math.Pow(zl, 2) + + 1.56152033906584164*math.Pow(zl, 3) + + -0.99242233534286128*math.Pow(zl, 4) + + 0.26064681399483092*math.Pow(zl, 5) + + -0.03053811369682807*math.Pow(zl, 6) + + 0.00155770210179105*math.Pow(zl, 7) +} + +func alpha(m float64) float64 { + switch m { + case 16: + return 0.673 + case 32: + return 0.697 + case 64: + return 0.709 + } + return 0.7213 / (1 + 1.079/m) +} + +func getPosVal(x uint64, p uint8) (uint64, uint8) { + i := bextr(x, 64-p, p) // {x63,...,x64-p} + w := x<

> start) & ((1 << length) - 1) +} + +func bextr32(v uint32, start, length uint8) uint32 { + return (v >> start) & ((1 << length) - 1) +} + +func hashFunc(e []byte) uint64 { + return metro.Hash64(e, 1337) +} diff --git a/vendor/github.com/alicebob/miniredis/keys.go b/vendor/github.com/alicebob/miniredis/v2/keys.go similarity index 65% rename from vendor/github.com/alicebob/miniredis/keys.go rename to vendor/github.com/alicebob/miniredis/v2/keys.go index b7cd98fb..058e0a79 100644 --- a/vendor/github.com/alicebob/miniredis/keys.go +++ b/vendor/github.com/alicebob/miniredis/v2/keys.go @@ -1,18 +1,18 @@ package miniredis -// Translate the 'KEYS' argument ('foo*', 'f??', &c.) into a regexp. +// Translate the 'KEYS' or 'PSUBSCRIBE' argument ('foo*', 'f??', &c.) into a regexp. import ( "bytes" "regexp" ) -// patternRE compiles a KEYS argument to a regexp. Returns nil if the given +// patternRE compiles a glob to a regexp. Returns nil if the given // pattern will never match anything. // The general strategy is to sandwich all non-meta characters between \Q...\E. func patternRE(k string) *regexp.Regexp { re := bytes.Buffer{} - re.WriteString(`^\Q`) + re.WriteString(`(?s)^\Q`) for i := 0; i < len(k); i++ { p := k[i] switch p { @@ -63,3 +63,21 @@ func patternRE(k string) *regexp.Regexp { re.WriteString(`\E$`) return regexp.MustCompile(re.String()) } + +// matchKeys filters only matching keys. +// The returned boolean is whether the match pattern was valid +func matchKeys(keys []string, match string) ([]string, bool) { + re := patternRE(match) + if re == nil { + // Special case: the given pattern won't match anything or is invalid. + return nil, false + } + var res []string + for _, k := range keys { + if !re.MatchString(k) { + continue + } + res = append(res, k) + } + return res, true +} diff --git a/vendor/github.com/alicebob/miniredis/lua.go b/vendor/github.com/alicebob/miniredis/v2/lua.go similarity index 65% rename from vendor/github.com/alicebob/miniredis/lua.go rename to vendor/github.com/alicebob/miniredis/v2/lua.go index a338425b..32f727b0 100644 --- a/vendor/github.com/alicebob/miniredis/lua.go +++ b/vendor/github.com/alicebob/miniredis/v2/lua.go @@ -1,28 +1,44 @@ package miniredis import ( - redigo "github.com/gomodule/redigo/redis" - "github.com/yuin/gopher-lua" + "bufio" + "bytes" + "fmt" + "strings" - "github.com/alicebob/miniredis/server" + lua "github.com/yuin/gopher-lua" + + "github.com/alicebob/miniredis/v2/server" ) -func mkLuaFuncs(conn redigo.Conn) map[string]lua.LGFunction { +var luaRedisConstants = map[string]lua.LValue{ + "LOG_DEBUG": lua.LNumber(0), + "LOG_VERBOSE": lua.LNumber(1), + "LOG_NOTICE": lua.LNumber(2), + "LOG_WARNING": lua.LNumber(3), +} + +func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[string]lua.LValue) { mkCall := func(failFast bool) func(l *lua.LState) int { + // one server.Ctx for a single Lua run + pCtx := &connCtx{} + if getCtx(c).authenticated { + pCtx.authenticated = true + } + pCtx.nested = true + pCtx.selectedDB = getCtx(c).selectedDB + return func(l *lua.LState) int { top := l.GetTop() if top == 0 { l.Error(lua.LString("Please specify at least one argument for redis.call()"), 1) return 0 } - var args []interface{} + var args []string for i := 1; i <= top; i++ { switch a := l.Get(i).(type) { - // case lua.LBool: - // args[i-2] = a case lua.LNumber: - // value, _ := strconv.ParseFloat(lua.LVAsString(arg), 64) - args = append(args, float64(a)) + args = append(args, a.String()) case lua.LString: args = append(args, string(a)) default: @@ -30,16 +46,27 @@ func mkLuaFuncs(conn redigo.Conn) map[string]lua.LGFunction { return 0 } } - cmd, ok := args[0].(string) - if !ok { - l.Error(lua.LString("Unknown Redis command called from Lua script"), 1) + if len(args) == 0 { + l.Error(lua.LString(msgNotFromScripts), 1) return 0 } - res, err := conn.Do(cmd, args[1:]...) + + buf := &bytes.Buffer{} + wr := bufio.NewWriter(buf) + peer := server.NewPeer(wr) + peer.Ctx = pCtx + srv.Dispatch(peer, args) + wr.Flush() + + res, err := server.ParseReply(bufio.NewReader(buf)) if err != nil { if failFast { // call() mode - l.Error(lua.LString(err.Error()), 1) + if strings.Contains(err.Error(), "ERR unknown command") { + l.Error(lua.LString("Unknown Redis command called from Lua script"), 1) + } else { + l.Error(lua.LString(err.Error()), 1) + } return 0 } // pcall() mode @@ -53,14 +80,19 @@ func mkLuaFuncs(conn redigo.Conn) map[string]lua.LGFunction { switch r := res.(type) { case int64: l.Push(lua.LNumber(r)) + case int: + l.Push(lua.LNumber(r)) case []uint8: l.Push(lua.LString(string(r))) case []interface{}: l.Push(redisToLua(l, r)) case string: l.Push(lua.LString(r)) + case error: + l.Error(lua.LString(r.Error()), 1) + return 0 default: - panic("type not handled") + panic(fmt.Sprintf("type not handled (%T)", r)) } } return 1 @@ -71,14 +103,32 @@ func mkLuaFuncs(conn redigo.Conn) map[string]lua.LGFunction { "call": mkCall(true), "pcall": mkCall(false), "error_reply": func(l *lua.LState) int { - msg := l.CheckString(1) + v := l.Get(1) + msg, ok := v.(lua.LString) + if !ok { + l.Error(lua.LString("wrong number or type of arguments"), 1) + return 0 + } res := &lua.LTable{} res.RawSetString("err", lua.LString(msg)) l.Push(res) return 1 }, + "log": func(l *lua.LState) int { + level := l.CheckInt(1) + msg := l.CheckString(2) + _, _ = level, msg + // do nothing by default. To see logs uncomment: + // fmt.Printf("%v: %v", level, msg) + return 0 + }, "status_reply": func(l *lua.LState) int { - msg := l.CheckString(1) + v := l.Get(1) + msg, ok := v.(lua.LString) + if !ok { + l.Error(lua.LString("wrong number or type of arguments"), 1) + return 0 + } res := &lua.LTable{} res.RawSetString("ok", lua.LString(msg)) l.Push(res) @@ -98,7 +148,7 @@ func mkLuaFuncs(conn redigo.Conn) map[string]lua.LGFunction { // ignored return 1 }, - } + }, luaRedisConstants } func luaToRedis(l *lua.LState, c *server.Peer, value lua.LValue) { diff --git a/vendor/github.com/alicebob/miniredis/v2/metro/LICENSE b/vendor/github.com/alicebob/miniredis/v2/metro/LICENSE new file mode 100644 index 00000000..6243b617 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/metro/LICENSE @@ -0,0 +1,24 @@ +This package is a mechanical translation of the reference C++ code for +MetroHash, available at https://github.com/jandrewrogers/MetroHash + +The MIT License (MIT) + +Copyright (c) 2016 Damian Gryski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alicebob/miniredis/v2/metro/README.md b/vendor/github.com/alicebob/miniredis/v2/metro/README.md new file mode 100644 index 00000000..07e4ee9f --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/metro/README.md @@ -0,0 +1 @@ +This is a partial copy of github.com/dgryski/go-metro. \ No newline at end of file diff --git a/vendor/github.com/alicebob/miniredis/v2/metro/metro64.go b/vendor/github.com/alicebob/miniredis/v2/metro/metro64.go new file mode 100644 index 00000000..5b3db9a9 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/metro/metro64.go @@ -0,0 +1,87 @@ +package metro + +import "encoding/binary" + +func Hash64(buffer []byte, seed uint64) uint64 { + + const ( + k0 = 0xD6D018F5 + k1 = 0xA2AA033B + k2 = 0x62992FC1 + k3 = 0x30BC5B29 + ) + + ptr := buffer + + hash := (seed + k2) * k0 + + if len(ptr) >= 32 { + v := [4]uint64{hash, hash, hash, hash} + + for len(ptr) >= 32 { + v[0] += binary.LittleEndian.Uint64(ptr[:8]) * k0 + v[0] = rotate_right(v[0], 29) + v[2] + v[1] += binary.LittleEndian.Uint64(ptr[8:16]) * k1 + v[1] = rotate_right(v[1], 29) + v[3] + v[2] += binary.LittleEndian.Uint64(ptr[16:24]) * k2 + v[2] = rotate_right(v[2], 29) + v[0] + v[3] += binary.LittleEndian.Uint64(ptr[24:32]) * k3 + v[3] = rotate_right(v[3], 29) + v[1] + ptr = ptr[32:] + } + + v[2] ^= rotate_right(((v[0]+v[3])*k0)+v[1], 37) * k1 + v[3] ^= rotate_right(((v[1]+v[2])*k1)+v[0], 37) * k0 + v[0] ^= rotate_right(((v[0]+v[2])*k0)+v[3], 37) * k1 + v[1] ^= rotate_right(((v[1]+v[3])*k1)+v[2], 37) * k0 + hash += v[0] ^ v[1] + } + + if len(ptr) >= 16 { + v0 := hash + (binary.LittleEndian.Uint64(ptr[:8]) * k2) + v0 = rotate_right(v0, 29) * k3 + v1 := hash + (binary.LittleEndian.Uint64(ptr[8:16]) * k2) + v1 = rotate_right(v1, 29) * k3 + v0 ^= rotate_right(v0*k0, 21) + v1 + v1 ^= rotate_right(v1*k3, 21) + v0 + hash += v1 + ptr = ptr[16:] + } + + if len(ptr) >= 8 { + hash += binary.LittleEndian.Uint64(ptr[:8]) * k3 + ptr = ptr[8:] + hash ^= rotate_right(hash, 55) * k1 + } + + if len(ptr) >= 4 { + hash += uint64(binary.LittleEndian.Uint32(ptr[:4])) * k3 + hash ^= rotate_right(hash, 26) * k1 + ptr = ptr[4:] + } + + if len(ptr) >= 2 { + hash += uint64(binary.LittleEndian.Uint16(ptr[:2])) * k3 + ptr = ptr[2:] + hash ^= rotate_right(hash, 48) * k1 + } + + if len(ptr) >= 1 { + hash += uint64(ptr[0]) * k3 + hash ^= rotate_right(hash, 37) * k1 + } + + hash ^= rotate_right(hash, 28) + hash *= k0 + hash ^= rotate_right(hash, 29) + + return hash +} + +func Hash64Str(buffer string, seed uint64) uint64 { + return Hash64([]byte(buffer), seed) +} + +func rotate_right(v uint64, k uint) uint64 { + return (v >> k) | (v << (64 - k)) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/miniredis.go b/vendor/github.com/alicebob/miniredis/v2/miniredis.go new file mode 100644 index 00000000..8f78dea2 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/miniredis.go @@ -0,0 +1,639 @@ +// Package miniredis is a pure Go Redis test server, for use in Go unittests. +// There are no dependencies on system binaries, and every server you start +// will be empty. +// +// import "github.com/alicebob/miniredis/v2" +// +// Start a server with `s := miniredis.RunT(t)`, it'll be shutdown via a t.Cleanup(). +// Or do everything manual: `s, err := miniredis.Run(); defer s.Close()` +// +// Point your Redis client to `s.Addr()` or `s.Host(), s.Port()`. +// +// Set keys directly via s.Set(...) and similar commands, or use a Redis client. +// +// For direct use you can select a Redis database with either `s.Select(12); +// s.Get("foo")` or `s.DB(12).Get("foo")`. +// +package miniredis + +import ( + "context" + "crypto/tls" + "fmt" + "math/rand" + "strconv" + "strings" + "sync" + "time" + + "github.com/alicebob/miniredis/v2/server" +) + +type hashKey map[string]string +type listKey []string +type setKey map[string]struct{} + +// RedisDB holds a single (numbered) Redis database. +type RedisDB struct { + master *Miniredis // pointer to the lock in Miniredis + id int // db id + keys map[string]string // Master map of keys with their type + stringKeys map[string]string // GET/SET &c. keys + hashKeys map[string]hashKey // MGET/MSET &c. keys + listKeys map[string]listKey // LPUSH &c. keys + setKeys map[string]setKey // SADD &c. keys + hllKeys map[string]*hll // PFADD &c. keys + sortedsetKeys map[string]sortedSet // ZADD &c. keys + streamKeys map[string]*streamKey // XADD &c. keys + ttl map[string]time.Duration // effective TTL values + keyVersion map[string]uint // used to watch values +} + +// Miniredis is a Redis server implementation. +type Miniredis struct { + sync.Mutex + srv *server.Server + port int + passwords map[string]string // username password + dbs map[int]*RedisDB + selectedDB int // DB id used in the direct Get(), Set() &c. + scripts map[string]string // sha1 -> lua src + signal *sync.Cond + now time.Time // time.Now() if not set. + subscribers map[*Subscriber]struct{} + rand *rand.Rand + Ctx context.Context + CtxCancel context.CancelFunc +} + +type txCmd func(*server.Peer, *connCtx) + +// database id + key combo +type dbKey struct { + db int + key string +} + +// connCtx has all state for a single connection. +type connCtx struct { + selectedDB int // selected DB + authenticated bool // auth enabled and a valid AUTH seen + transaction []txCmd // transaction callbacks. Or nil. + dirtyTransaction bool // any error during QUEUEing + watch map[dbKey]uint // WATCHed keys + subscriber *Subscriber // client is in PUBSUB mode if not nil + nested bool // this is called via Lua +} + +// NewMiniRedis makes a new, non-started, Miniredis object. +func NewMiniRedis() *Miniredis { + m := Miniredis{ + dbs: map[int]*RedisDB{}, + scripts: map[string]string{}, + subscribers: map[*Subscriber]struct{}{}, + } + m.Ctx, m.CtxCancel = context.WithCancel(context.Background()) + m.signal = sync.NewCond(&m) + return &m +} + +func newRedisDB(id int, m *Miniredis) RedisDB { + return RedisDB{ + id: id, + master: m, + keys: map[string]string{}, + stringKeys: map[string]string{}, + hashKeys: map[string]hashKey{}, + listKeys: map[string]listKey{}, + setKeys: map[string]setKey{}, + hllKeys: map[string]*hll{}, + sortedsetKeys: map[string]sortedSet{}, + streamKeys: map[string]*streamKey{}, + ttl: map[string]time.Duration{}, + keyVersion: map[string]uint{}, + } +} + +// Run creates and Start()s a Miniredis. +func Run() (*Miniredis, error) { + m := NewMiniRedis() + return m, m.Start() +} + +// Run creates and Start()s a Miniredis, TLS version. +func RunTLS(cfg *tls.Config) (*Miniredis, error) { + m := NewMiniRedis() + return m, m.StartTLS(cfg) +} + +// Tester is a minimal version of a testing.T +type Tester interface { + Fatalf(string, ...interface{}) + Cleanup(func()) +} + +// RunT start a new miniredis, pass it a testing.T. It also registers the cleanup after your test is done. +func RunT(t Tester) *Miniredis { + m := NewMiniRedis() + if err := m.Start(); err != nil { + t.Fatalf("could not start miniredis: %s", err) + // not reached + } + t.Cleanup(m.Close) + return m +} + +// Start starts a server. It listens on a random port on localhost. See also +// Addr(). +func (m *Miniredis) Start() error { + s, err := server.NewServer(fmt.Sprintf("127.0.0.1:%d", m.port)) + if err != nil { + return err + } + return m.start(s) +} + +// Start starts a server, TLS version. +func (m *Miniredis) StartTLS(cfg *tls.Config) error { + s, err := server.NewServerTLS(fmt.Sprintf("127.0.0.1:%d", m.port), cfg) + if err != nil { + return err + } + return m.start(s) +} + +// StartAddr runs miniredis with a given addr. Examples: "127.0.0.1:6379", +// ":6379", or "127.0.0.1:0" +func (m *Miniredis) StartAddr(addr string) error { + s, err := server.NewServer(addr) + if err != nil { + return err + } + return m.start(s) +} + +func (m *Miniredis) start(s *server.Server) error { + m.Lock() + defer m.Unlock() + m.srv = s + m.port = s.Addr().Port + + commandsConnection(m) + commandsGeneric(m) + commandsServer(m) + commandsString(m) + commandsHash(m) + commandsList(m) + commandsPubsub(m) + commandsSet(m) + commandsSortedSet(m) + commandsStream(m) + commandsTransaction(m) + commandsScripting(m) + commandsGeo(m) + commandsCluster(m) + commandsCommand(m) + commandsHll(m) + + return nil +} + +// Restart restarts a Close()d server on the same port. Values will be +// preserved. +func (m *Miniredis) Restart() error { + return m.Start() +} + +// Close shuts down a Miniredis. +func (m *Miniredis) Close() { + m.Lock() + + if m.srv == nil { + m.Unlock() + return + } + srv := m.srv + m.srv = nil + m.CtxCancel() + m.Unlock() + + // the OnDisconnect callbacks can lock m, so run Close() outside the lock. + srv.Close() + +} + +// RequireAuth makes every connection need to AUTH first. This is the old 'AUTH [password] command. +// Remove it by setting an empty string. +func (m *Miniredis) RequireAuth(pw string) { + m.RequireUserAuth("default", pw) +} + +// Add a username/password, for use with 'AUTH [username] [password]'. +// There are currently no access controls for commands implemented. +// Disable access for the user with an empty password. +func (m *Miniredis) RequireUserAuth(username, pw string) { + m.Lock() + defer m.Unlock() + if m.passwords == nil { + m.passwords = map[string]string{} + } + if pw == "" { + delete(m.passwords, username) + return + } + m.passwords[username] = pw +} + +// DB returns a DB by ID. +func (m *Miniredis) DB(i int) *RedisDB { + m.Lock() + defer m.Unlock() + return m.db(i) +} + +// get DB. No locks! +func (m *Miniredis) db(i int) *RedisDB { + if db, ok := m.dbs[i]; ok { + return db + } + db := newRedisDB(i, m) // main miniredis has our mutex. + m.dbs[i] = &db + return &db +} + +// SwapDB swaps DBs by IDs. +func (m *Miniredis) SwapDB(i, j int) bool { + m.Lock() + defer m.Unlock() + return m.swapDB(i, j) +} + +// swap DB. No locks! +func (m *Miniredis) swapDB(i, j int) bool { + db1 := m.db(i) + db2 := m.db(j) + + db1.id = j + db2.id = i + + m.dbs[i] = db2 + m.dbs[j] = db1 + + return true +} + +// Addr returns '127.0.0.1:12345'. Can be given to a Dial(). See also Host() +// and Port(), which return the same things. +func (m *Miniredis) Addr() string { + m.Lock() + defer m.Unlock() + return m.srv.Addr().String() +} + +// Host returns the host part of Addr(). +func (m *Miniredis) Host() string { + m.Lock() + defer m.Unlock() + return m.srv.Addr().IP.String() +} + +// Port returns the (random) port part of Addr(). +func (m *Miniredis) Port() string { + m.Lock() + defer m.Unlock() + return strconv.Itoa(m.srv.Addr().Port) +} + +// CommandCount returns the number of processed commands. +func (m *Miniredis) CommandCount() int { + m.Lock() + defer m.Unlock() + return int(m.srv.TotalCommands()) +} + +// CurrentConnectionCount returns the number of currently connected clients. +func (m *Miniredis) CurrentConnectionCount() int { + m.Lock() + defer m.Unlock() + return m.srv.ClientsLen() +} + +// TotalConnectionCount returns the number of client connections since server start. +func (m *Miniredis) TotalConnectionCount() int { + m.Lock() + defer m.Unlock() + return int(m.srv.TotalConnections()) +} + +// FastForward decreases all TTLs by the given duration. All TTLs <= 0 will be +// expired. +func (m *Miniredis) FastForward(duration time.Duration) { + m.Lock() + defer m.Unlock() + for _, db := range m.dbs { + db.fastForward(duration) + } +} + +// Server returns the underlying server to allow custom commands to be implemented +func (m *Miniredis) Server() *server.Server { + return m.srv +} + +// Dump returns a text version of the selected DB, usable for debugging. +func (m *Miniredis) Dump() string { + m.Lock() + defer m.Unlock() + + var ( + maxLen = 60 + indent = " " + db = m.db(m.selectedDB) + r = "" + v = func(s string) string { + suffix := "" + if len(s) > maxLen { + suffix = fmt.Sprintf("...(%d)", len(s)) + s = s[:maxLen-len(suffix)] + } + return fmt.Sprintf("%q%s", s, suffix) + } + ) + for _, k := range db.allKeys() { + r += fmt.Sprintf("- %s\n", k) + t := db.t(k) + switch t { + case "string": + r += fmt.Sprintf("%s%s\n", indent, v(db.stringKeys[k])) + case "hash": + for _, hk := range db.hashFields(k) { + r += fmt.Sprintf("%s%s: %s\n", indent, hk, v(db.hashGet(k, hk))) + } + case "list": + for _, lk := range db.listKeys[k] { + r += fmt.Sprintf("%s%s\n", indent, v(lk)) + } + case "set": + for _, mk := range db.setMembers(k) { + r += fmt.Sprintf("%s%s\n", indent, v(mk)) + } + case "zset": + for _, el := range db.ssetElements(k) { + r += fmt.Sprintf("%s%f: %s\n", indent, el.score, v(el.member)) + } + case "stream": + for _, entry := range db.streamKeys[k].entries { + r += fmt.Sprintf("%s%s\n", indent, entry.ID) + ev := entry.Values + for i := 0; i < len(ev)/2; i++ { + r += fmt.Sprintf("%s%s%s: %s\n", indent, indent, v(ev[2*i]), v(ev[2*i+1])) + } + } + case "hll": + for _, entry := range db.hllKeys { + r += fmt.Sprintf("%s%s\n", indent, v(string(entry.Bytes()))) + } + default: + r += fmt.Sprintf("%s(a %s, fixme!)\n", indent, t) + } + } + return r +} + +// SetTime sets the time against which EXPIREAT values are compared, and the +// time used in stream entry IDs. Will use time.Now() if this is not set. +func (m *Miniredis) SetTime(t time.Time) { + m.Lock() + defer m.Unlock() + m.now = t +} + +// make every command return this message. For example: +// LOADING Redis is loading the dataset in memory +// MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'. +// Clear it with an empty string. Don't add newlines. +func (m *Miniredis) SetError(msg string) { + cb := server.Hook(nil) + if msg != "" { + cb = func(c *server.Peer, cmd string, args ...string) bool { + c.WriteError(msg) + return true + } + } + m.srv.SetPreHook(cb) +} + +// handleAuth returns false if connection has no access. It sends the reply. +func (m *Miniredis) handleAuth(c *server.Peer) bool { + if getCtx(c).nested { + return true + } + + m.Lock() + defer m.Unlock() + if len(m.passwords) == 0 { + return true + } + if !getCtx(c).authenticated { + c.WriteError("NOAUTH Authentication required.") + return false + } + return true +} + +// handlePubsub sends an error to the user if the connection is in PUBSUB mode. +// It'll return true if it did. +func (m *Miniredis) checkPubsub(c *server.Peer, cmd string) bool { + if getCtx(c).nested { + return false + } + + m.Lock() + defer m.Unlock() + + ctx := getCtx(c) + if ctx.subscriber == nil { + return false + } + + prefix := "ERR " + if strings.ToLower(cmd) == "exec" { + prefix = "EXECABORT Transaction discarded because of: " + } + c.WriteError(fmt.Sprintf( + "%sCan't execute '%s': only (P)SUBSCRIBE / (P)UNSUBSCRIBE / PING / QUIT are allowed in this context", + prefix, + strings.ToLower(cmd), + )) + return true +} + +func getCtx(c *server.Peer) *connCtx { + if c.Ctx == nil { + c.Ctx = &connCtx{} + } + return c.Ctx.(*connCtx) +} + +func startTx(ctx *connCtx) { + ctx.transaction = []txCmd{} + ctx.dirtyTransaction = false +} + +func stopTx(ctx *connCtx) { + ctx.transaction = nil + unwatch(ctx) +} + +func inTx(ctx *connCtx) bool { + return ctx.transaction != nil +} + +func addTxCmd(ctx *connCtx, cb txCmd) { + ctx.transaction = append(ctx.transaction, cb) +} + +func watch(db *RedisDB, ctx *connCtx, key string) { + if ctx.watch == nil { + ctx.watch = map[dbKey]uint{} + } + ctx.watch[dbKey{db: db.id, key: key}] = db.keyVersion[key] // Can be 0. +} + +func unwatch(ctx *connCtx) { + ctx.watch = nil +} + +// setDirty can be called even when not in an tx. Is an no-op then. +func setDirty(c *server.Peer) { + if c.Ctx == nil { + // No transaction. Not relevant. + return + } + getCtx(c).dirtyTransaction = true +} + +func (m *Miniredis) addSubscriber(s *Subscriber) { + m.subscribers[s] = struct{}{} +} + +// closes and remove the subscriber. +func (m *Miniredis) removeSubscriber(s *Subscriber) { + _, ok := m.subscribers[s] + delete(m.subscribers, s) + if ok { + s.Close() + } +} + +func (m *Miniredis) publish(c, msg string) int { + n := 0 + for s := range m.subscribers { + n += s.Publish(c, msg) + } + return n +} + +// enter 'subscribed state', or return the existing one. +func (m *Miniredis) subscribedState(c *server.Peer) *Subscriber { + ctx := getCtx(c) + sub := ctx.subscriber + if sub != nil { + return sub + } + + sub = newSubscriber() + m.addSubscriber(sub) + + c.OnDisconnect(func() { + m.Lock() + m.removeSubscriber(sub) + m.Unlock() + }) + + ctx.subscriber = sub + + go monitorPublish(c, sub.publish) + go monitorPpublish(c, sub.ppublish) + + return sub +} + +// whenever the p?sub count drops to 0 subscribed state should be stopped, and +// all redis commands are allowed again. +func endSubscriber(m *Miniredis, c *server.Peer) { + ctx := getCtx(c) + if sub := ctx.subscriber; sub != nil { + m.removeSubscriber(sub) // will Close() the sub + } + ctx.subscriber = nil +} + +// Start a new pubsub subscriber. It can (un) subscribe to channels and +// patterns, and has a channel to get published messages. Close it with +// Close(). +// Does not close itself when there are no subscriptions left. +func (m *Miniredis) NewSubscriber() *Subscriber { + sub := newSubscriber() + + m.Lock() + m.addSubscriber(sub) + m.Unlock() + + return sub +} + +func (m *Miniredis) allSubscribers() []*Subscriber { + var subs []*Subscriber + for s := range m.subscribers { + subs = append(subs, s) + } + return subs +} + +func (m *Miniredis) Seed(seed int) { + m.Lock() + defer m.Unlock() + + // m.rand is not safe for concurrent use. + m.rand = rand.New(rand.NewSource(int64(seed))) +} + +func (m *Miniredis) randIntn(n int) int { + if m.rand == nil { + return rand.Intn(n) + } + return m.rand.Intn(n) +} + +// shuffle shuffles a list of strings. Kinda. +func (m *Miniredis) shuffle(l []string) { + for range l { + i := m.randIntn(len(l)) + j := m.randIntn(len(l)) + l[i], l[j] = l[j], l[i] + } +} + +func (m *Miniredis) effectiveNow() time.Time { + if !m.now.IsZero() { + return m.now + } + return time.Now().UTC() +} + +// convert a unixtimestamp to a duration, to use an absolute time as TTL. +// d can be either time.Second or time.Millisecond. +func (m *Miniredis) at(i int, d time.Duration) time.Duration { + var ts time.Time + switch d { + case time.Millisecond: + ts = time.Unix(int64(i/1000), 1000000*int64(i%1000)) + case time.Second: + ts = time.Unix(int64(i), 0) + default: + panic("invalid time unit (d). Fixme!") + } + now := m.effectiveNow() + return ts.Sub(now) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/pubsub.go b/vendor/github.com/alicebob/miniredis/v2/pubsub.go new file mode 100644 index 00000000..bb31f80a --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/pubsub.go @@ -0,0 +1,240 @@ +package miniredis + +import ( + "regexp" + "sort" + "sync" + + "github.com/alicebob/miniredis/v2/server" +) + +// PubsubMessage is what gets broadcasted over pubsub channels. +type PubsubMessage struct { + Channel string + Message string +} + +type PubsubPmessage struct { + Pattern string + Channel string + Message string +} + +// Subscriber has the (p)subscriptions. +type Subscriber struct { + publish chan PubsubMessage + ppublish chan PubsubPmessage + channels map[string]struct{} + patterns map[string]*regexp.Regexp + mu sync.Mutex +} + +// Make a new subscriber. The channel is not buffered, so you will need to keep +// reading using Messages(). Use Close() when done, or unsubscribe. +func newSubscriber() *Subscriber { + return &Subscriber{ + publish: make(chan PubsubMessage), + ppublish: make(chan PubsubPmessage), + channels: map[string]struct{}{}, + patterns: map[string]*regexp.Regexp{}, + } +} + +// Close the listening channel +func (s *Subscriber) Close() { + close(s.publish) + close(s.ppublish) +} + +// Count the total number of channels and patterns +func (s *Subscriber) Count() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.count() +} + +func (s *Subscriber) count() int { + return len(s.channels) + len(s.patterns) +} + +// Subscribe to a channel. Returns the total number of (p)subscriptions after +// subscribing. +func (s *Subscriber) Subscribe(c string) int { + s.mu.Lock() + defer s.mu.Unlock() + + s.channels[c] = struct{}{} + return s.count() +} + +// Unsubscribe a channel. Returns the total number of (p)subscriptions after +// unsubscribing. +func (s *Subscriber) Unsubscribe(c string) int { + s.mu.Lock() + defer s.mu.Unlock() + + delete(s.channels, c) + return s.count() +} + +// Subscribe to a pattern. Returns the total number of (p)subscriptions after +// subscribing. +func (s *Subscriber) Psubscribe(pat string) int { + s.mu.Lock() + defer s.mu.Unlock() + + s.patterns[pat] = patternRE(pat) + return s.count() +} + +// Unsubscribe a pattern. Returns the total number of (p)subscriptions after +// unsubscribing. +func (s *Subscriber) Punsubscribe(pat string) int { + s.mu.Lock() + defer s.mu.Unlock() + + delete(s.patterns, pat) + return s.count() +} + +// List all subscribed channels, in alphabetical order +func (s *Subscriber) Channels() []string { + s.mu.Lock() + defer s.mu.Unlock() + + var cs []string + for c := range s.channels { + cs = append(cs, c) + } + sort.Strings(cs) + return cs +} + +// List all subscribed patterns, in alphabetical order +func (s *Subscriber) Patterns() []string { + s.mu.Lock() + defer s.mu.Unlock() + + var ps []string + for p := range s.patterns { + ps = append(ps, p) + } + sort.Strings(ps) + return ps +} + +// Publish a message. Will return return how often we sent the message (can be +// a match for a subscription and for a psubscription. +func (s *Subscriber) Publish(c, msg string) int { + s.mu.Lock() + defer s.mu.Unlock() + + found := 0 + +subs: + for sub := range s.channels { + if sub == c { + s.publish <- PubsubMessage{c, msg} + found++ + break subs + } + } + +pats: + for orig, pat := range s.patterns { + if pat != nil && pat.MatchString(c) { + s.ppublish <- PubsubPmessage{orig, c, msg} + found++ + break pats + } + } + + return found +} + +// The channel to read messages for this subscriber. Only for messages matching +// a SUBSCRIBE. +func (s *Subscriber) Messages() <-chan PubsubMessage { + return s.publish +} + +// The channel to read messages for this subscriber. Only for messages matching +// a PSUBSCRIBE. +func (s *Subscriber) Pmessages() <-chan PubsubPmessage { + return s.ppublish +} + +// List all pubsub channels. If `pat` isn't empty channels names must match the +// pattern. Channels are returned alphabetically. +func activeChannels(subs []*Subscriber, pat string) []string { + channels := map[string]struct{}{} + for _, s := range subs { + for c := range s.channels { + channels[c] = struct{}{} + } + } + + var cpat *regexp.Regexp + if pat != "" { + cpat = patternRE(pat) + } + + var cs []string + for k := range channels { + if cpat != nil && !cpat.MatchString(k) { + continue + } + cs = append(cs, k) + } + sort.Strings(cs) + return cs +} + +// Count all subscribed (not psubscribed) clients for the given channel +// pattern. Channels are returned alphabetically. +func countSubs(subs []*Subscriber, channel string) int { + n := 0 + for _, p := range subs { + for c := range p.channels { + if c == channel { + n++ + break + } + } + } + return n +} + +// Count the total of all client psubscriptions. +func countPsubs(subs []*Subscriber) int { + n := 0 + for _, p := range subs { + n += len(p.patterns) + } + return n +} + +func monitorPublish(conn *server.Peer, msgs <-chan PubsubMessage) { + for msg := range msgs { + conn.Block(func(c *server.Writer) { + c.WritePushLen(3) + c.WriteBulk("message") + c.WriteBulk(msg.Channel) + c.WriteBulk(msg.Message) + c.Flush() + }) + } +} + +func monitorPpublish(conn *server.Peer, msgs <-chan PubsubPmessage) { + for msg := range msgs { + conn.Block(func(c *server.Writer) { + c.WritePushLen(4) + c.WriteBulk("pmessage") + c.WriteBulk(msg.Pattern) + c.WriteBulk(msg.Channel) + c.WriteBulk(msg.Message) + c.Flush() + }) + } +} diff --git a/vendor/github.com/alicebob/miniredis/v2/redis.go b/vendor/github.com/alicebob/miniredis/v2/redis.go new file mode 100644 index 00000000..e870f2c3 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/redis.go @@ -0,0 +1,226 @@ +package miniredis + +import ( + "context" + "fmt" + "math/big" + "strings" + "sync" + "time" + + "github.com/alicebob/miniredis/v2/server" +) + +const ( + msgWrongType = "WRONGTYPE Operation against a key holding the wrong kind of value" + msgNotValidHllValue = "WRONGTYPE Key is not a valid HyperLogLog string value." + msgInvalidInt = "ERR value is not an integer or out of range" + msgInvalidFloat = "ERR value is not a valid float" + msgInvalidMinMax = "ERR min or max is not a float" + msgInvalidRangeItem = "ERR min or max not valid string range item" + msgInvalidTimeout = "ERR timeout is not a float or out of range" + msgSyntaxError = "ERR syntax error" + msgKeyNotFound = "ERR no such key" + msgOutOfRange = "ERR index out of range" + msgInvalidCursor = "ERR invalid cursor" + msgXXandNX = "ERR XX and NX options at the same time are not compatible" + msgNegTimeout = "ERR timeout is negative" + msgInvalidSETime = "ERR invalid expire time in set" + msgInvalidSETEXTime = "ERR invalid expire time in setex" + msgInvalidPSETEXTime = "ERR invalid expire time in psetex" + msgInvalidKeysNumber = "ERR Number of keys can't be greater than number of args" + msgNegativeKeysNumber = "ERR Number of keys can't be negative" + msgFScriptUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try SCRIPT HELP." + msgFPubsubUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try PUBSUB HELP." + msgScriptFlush = "ERR SCRIPT FLUSH only support SYNC|ASYNC option" + msgSingleElementPair = "ERR INCR option supports a single increment-element pair" + msgInvalidStreamID = "ERR Invalid stream ID specified as stream command argument" + msgStreamIDTooSmall = "ERR The ID specified in XADD is equal or smaller than the target stream top item" + msgStreamIDZero = "ERR The ID specified in XADD must be greater than 0-0" + msgNoScriptFound = "NOSCRIPT No matching script. Please use EVAL." + msgUnsupportedUnit = "ERR unsupported unit provided. please use m, km, ft, mi" + msgNotFromScripts = "This Redis command is not allowed from scripts" + msgXreadUnbalanced = "ERR Unbalanced XREAD list of streams: for each stream key an ID or '$' must be specified." + msgXgroupKeyNotFound = "ERR The XGROUP subcommand requires the key to exist. Note that for CREATE you may want to use the MKSTREAM option to create an empty stream automatically." + msgXtrimInvalidStrategy = "ERR unsupported XTRIM strategy. Please use MAXLEN, MINID" + msgXtrimInvalidMaxLen = "ERR value is not an integer or out of range" + msgXtrimInvalidLimit = "ERR syntax error, LIMIT cannot be used without the special ~ option" +) + +func errWrongNumber(cmd string) string { + return fmt.Sprintf("ERR wrong number of arguments for '%s' command", strings.ToLower(cmd)) +} + +func errLuaParseError(err error) string { + return fmt.Sprintf("ERR Error compiling script (new function): %s", err.Error()) +} + +func errReadgroup(key, group string) error { + return fmt.Errorf("NOGROUP No such key '%s' or consumer group '%s'", key, group) +} + +func errXreadgroup(key, group string) error { + return fmt.Errorf("NOGROUP No such key '%s' or consumer group '%s' in XREADGROUP with GROUP option", key, group) +} + +// withTx wraps the non-argument-checking part of command handling code in +// transaction logic. +func withTx( + m *Miniredis, + c *server.Peer, + cb txCmd, +) { + ctx := getCtx(c) + + if ctx.nested { + // this is a call via Lua's .call(). It's already locked. + cb(c, ctx) + m.signal.Broadcast() + return + } + + if inTx(ctx) { + addTxCmd(ctx, cb) + c.WriteInline("QUEUED") + return + } + m.Lock() + cb(c, ctx) + // done, wake up anyone who waits on anything. + m.signal.Broadcast() + m.Unlock() +} + +// blockCmd is executed returns whether it is done +type blockCmd func(*server.Peer, *connCtx) bool + +// blocking keeps trying a command until the callback returns true. Calls +// onTimeout after the timeout (or when we call this in a transaction). +func blocking( + m *Miniredis, + c *server.Peer, + timeout time.Duration, + cb blockCmd, + onTimeout func(*server.Peer), +) { + var ( + ctx = getCtx(c) + ) + if inTx(ctx) { + addTxCmd(ctx, func(c *server.Peer, ctx *connCtx) { + if !cb(c, ctx) { + onTimeout(c) + } + }) + c.WriteInline("QUEUED") + return + } + + localCtx, cancel := context.WithCancel(m.Ctx) + defer cancel() + timedOut := false + if timeout != 0 { + go setCondTimer(localCtx, m.signal, &timedOut, timeout) + } + go func() { + <-localCtx.Done() + m.signal.Broadcast() // main loop might miss this signal + }() + + m.Lock() + defer m.Unlock() + for { + done := cb(c, ctx) + if done { + return + } + + if m.Ctx.Err() != nil { + return + } + if timedOut { + onTimeout(c) + return + } + + m.signal.Wait() + } +} + +func setCondTimer(ctx context.Context, sig *sync.Cond, timedOut *bool, timeout time.Duration) { + dl := time.NewTimer(timeout) + defer dl.Stop() + select { + case <-dl.C: + sig.L.Lock() // for timedOut + *timedOut = true + sig.Broadcast() // main loop might miss this signal + sig.L.Unlock() + case <-ctx.Done(): + } +} + +// formatBig formats a float the way redis does +func formatBig(v *big.Float) string { + // Format with %f and strip trailing 0s. + if v.IsInf() { + return "inf" + } + // if math.IsInf(v, -1) { + // return "-inf" + // } + return stripZeros(fmt.Sprintf("%.17f", v)) +} + +func stripZeros(sv string) string { + for strings.Contains(sv, ".") { + if sv[len(sv)-1] != '0' { + break + } + // Remove trailing 0s. + sv = sv[:len(sv)-1] + // Ends with a '.'. + if sv[len(sv)-1] == '.' { + sv = sv[:len(sv)-1] + break + } + } + return sv +} + +// redisRange gives Go offsets for something l long with start/end in +// Redis semantics. Both start and end can be negative. +// Used for string range and list range things. +// The results can be used as: v[start:end] +// Note that GETRANGE (on a string key) never returns an empty string when end +// is a large negative number. +func redisRange(l, start, end int, stringSymantics bool) (int, int) { + if start < 0 { + start = l + start + if start < 0 { + start = 0 + } + } + if start > l { + start = l + } + + if end < 0 { + end = l + end + if end < 0 { + end = -1 + if stringSymantics { + end = 0 + } + } + } + end++ // end argument is inclusive in Redis. + if end > l { + end = l + } + + if end < start { + return 0, 0 + } + return start, end +} diff --git a/vendor/github.com/alicebob/miniredis/server/Makefile b/vendor/github.com/alicebob/miniredis/v2/server/Makefile similarity index 100% rename from vendor/github.com/alicebob/miniredis/server/Makefile rename to vendor/github.com/alicebob/miniredis/v2/server/Makefile diff --git a/vendor/github.com/alicebob/miniredis/server/proto.go b/vendor/github.com/alicebob/miniredis/v2/server/proto.go similarity index 53% rename from vendor/github.com/alicebob/miniredis/server/proto.go rename to vendor/github.com/alicebob/miniredis/v2/server/proto.go index 27e62d4f..d09d16a1 100644 --- a/vendor/github.com/alicebob/miniredis/server/proto.go +++ b/vendor/github.com/alicebob/miniredis/v2/server/proto.go @@ -82,3 +82,74 @@ func readString(rd *bufio.Reader) (string, error) { return string(buf[:length]), nil } } + +// parse a reply +func ParseReply(rd *bufio.Reader) (interface{}, error) { + line, err := rd.ReadString('\n') + if err != nil { + return nil, err + } + if len(line) < 3 { + return nil, ErrProtocol + } + + switch line[0] { + default: + return nil, ErrProtocol + case '+': + // +: simple string + return string(line[1 : len(line)-2]), nil + case '-': + // -: errors + return nil, errors.New(string(line[1 : len(line)-2])) + case ':': + // :: integer + v := line[1 : len(line)-2] + if v == "" { + return 0, nil + } + n, err := strconv.Atoi(v) + if err != nil { + return nil, ErrProtocol + } + return n, nil + case '$': + // bulk strings are: `$5\r\nhello\r\n` + length, err := strconv.Atoi(line[1 : len(line)-2]) + if err != nil { + return "", err + } + if length < 0 { + // -1 is a nil response + return nil, nil + } + var ( + buf = make([]byte, length+2) + pos = 0 + ) + for pos < length+2 { + n, err := rd.Read(buf[pos:]) + if err != nil { + return "", err + } + pos += n + } + return string(buf[:length]), nil + case '*': + // array + l, err := strconv.Atoi(line[1 : len(line)-2]) + if err != nil { + return nil, ErrProtocol + } + // l can be -1 + var fields []interface{} + for ; l > 0; l-- { + s, err := ParseReply(rd) + if err != nil { + return nil, err + } + fields = append(fields, s) + } + return fields, nil + } +} diff --git a/vendor/github.com/alicebob/miniredis/v2/server/server.go b/vendor/github.com/alicebob/miniredis/v2/server/server.go new file mode 100644 index 00000000..60e391f2 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/server/server.go @@ -0,0 +1,487 @@ +package server + +import ( + "bufio" + "crypto/tls" + "fmt" + "math" + "net" + "strings" + "sync" + "unicode" +) + +func errUnknownCommand(cmd string, args []string) string { + s := fmt.Sprintf("ERR unknown command `%s`, with args beginning with: ", cmd) + if len(args) > 20 { + args = args[:20] + } + for _, a := range args { + s += fmt.Sprintf("`%s`, ", a) + } + return s +} + +// Cmd is what Register expects +type Cmd func(c *Peer, cmd string, args []string) + +type DisconnectHandler func(c *Peer) + +// Hook is can be added to run before every cmd. Return true if the command is done. +type Hook func(*Peer, string, ...string) bool + +// Server is a simple redis server +type Server struct { + l net.Listener + cmds map[string]Cmd + preHook Hook + peers map[net.Conn]struct{} + mu sync.Mutex + wg sync.WaitGroup + infoConns int + infoCmds int +} + +// NewServer makes a server listening on addr. Close with .Close(). +func NewServer(addr string) (*Server, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + return newServer(l), nil +} + +func NewServerTLS(addr string, cfg *tls.Config) (*Server, error) { + l, err := tls.Listen("tcp", addr, cfg) + if err != nil { + return nil, err + } + return newServer(l), nil +} + +func newServer(l net.Listener) *Server { + s := Server{ + cmds: map[string]Cmd{}, + peers: map[net.Conn]struct{}{}, + l: l, + } + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.serve(l) + + s.mu.Lock() + for c := range s.peers { + c.Close() + } + s.mu.Unlock() + }() + return &s +} + +// (un)set a hook which is ran before every call. It returns true if the command is done. +func (s *Server) SetPreHook(h Hook) { + s.mu.Lock() + s.preHook = h + s.mu.Unlock() +} + +func (s *Server) serve(l net.Listener) { + for { + conn, err := l.Accept() + if err != nil { + return + } + s.ServeConn(conn) + } +} + +// ServeConn handles a net.Conn. Nice with net.Pipe() +func (s *Server) ServeConn(conn net.Conn) { + s.wg.Add(1) + s.mu.Lock() + s.peers[conn] = struct{}{} + s.infoConns++ + s.mu.Unlock() + + go func() { + defer s.wg.Done() + defer conn.Close() + + s.servePeer(conn) + + s.mu.Lock() + delete(s.peers, conn) + s.mu.Unlock() + }() +} + +// Addr has the net.Addr struct +func (s *Server) Addr() *net.TCPAddr { + s.mu.Lock() + defer s.mu.Unlock() + if s.l == nil { + return nil + } + return s.l.Addr().(*net.TCPAddr) +} + +// Close a server started with NewServer. It will wait until all clients are +// closed. +func (s *Server) Close() { + s.mu.Lock() + if s.l != nil { + s.l.Close() + } + s.l = nil + s.mu.Unlock() + + s.wg.Wait() +} + +// Register a command. It can't have been registered before. Safe to call on a +// running server. +func (s *Server) Register(cmd string, f Cmd) error { + s.mu.Lock() + defer s.mu.Unlock() + cmd = strings.ToUpper(cmd) + if _, ok := s.cmds[cmd]; ok { + return fmt.Errorf("command already registered: %s", cmd) + } + s.cmds[cmd] = f + return nil +} + +func (s *Server) servePeer(c net.Conn) { + r := bufio.NewReader(c) + peer := &Peer{ + w: bufio.NewWriter(c), + } + defer func() { + for _, f := range peer.onDisconnect { + f() + } + }() + + for { + args, err := readArray(r) + if err != nil { + return + } + s.Dispatch(peer, args) + peer.Flush() + + s.mu.Lock() + closed := peer.closed + s.mu.Unlock() + if closed { + c.Close() + } + } +} + +func (s *Server) Dispatch(c *Peer, args []string) { + cmd, args := args[0], args[1:] + cmdUp := strings.ToUpper(cmd) + s.mu.Lock() + h := s.preHook + s.mu.Unlock() + if h != nil { + if h(c, cmdUp, args...) { + return + } + } + + s.mu.Lock() + cb, ok := s.cmds[cmdUp] + s.mu.Unlock() + if !ok { + c.WriteError(errUnknownCommand(cmd, args)) + return + } + + s.mu.Lock() + s.infoCmds++ + s.mu.Unlock() + cb(c, cmdUp, args) +} + +// TotalCommands is total (known) commands since this the server started +func (s *Server) TotalCommands() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.infoCmds +} + +// ClientsLen gives the number of connected clients right now +func (s *Server) ClientsLen() int { + s.mu.Lock() + defer s.mu.Unlock() + return len(s.peers) +} + +// TotalConnections give the number of clients connected since the server +// started, including the currently connected ones +func (s *Server) TotalConnections() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.infoConns +} + +// Peer is a client connected to the server +type Peer struct { + w *bufio.Writer + closed bool + Resp3 bool + Ctx interface{} // anything goes, server won't touch this + onDisconnect []func() // list of callbacks + mu sync.Mutex // for Block() +} + +func NewPeer(w *bufio.Writer) *Peer { + return &Peer{ + w: w, + } +} + +// Flush the write buffer. Called automatically after every redis command +func (c *Peer) Flush() { + c.mu.Lock() + defer c.mu.Unlock() + c.w.Flush() +} + +// Close the client connection after the current command is done. +func (c *Peer) Close() { + c.mu.Lock() + defer c.mu.Unlock() + c.closed = true +} + +// Register a function to execute on disconnect. There can be multiple +// functions registered. +func (c *Peer) OnDisconnect(f func()) { + c.onDisconnect = append(c.onDisconnect, f) +} + +// issue multiple calls, guarded with a mutex +func (c *Peer) Block(f func(*Writer)) { + c.mu.Lock() + defer c.mu.Unlock() + f(&Writer{c.w, c.Resp3}) +} + +// WriteError writes a redis 'Error' +func (c *Peer) WriteError(e string) { + c.Block(func(w *Writer) { + w.WriteError(e) + }) +} + +// WriteInline writes a redis inline string +func (c *Peer) WriteInline(s string) { + c.Block(func(w *Writer) { + w.WriteInline(s) + }) +} + +// WriteOK write the inline string `OK` +func (c *Peer) WriteOK() { + c.WriteInline("OK") +} + +// WriteBulk writes a bulk string +func (c *Peer) WriteBulk(s string) { + c.Block(func(w *Writer) { + w.WriteBulk(s) + }) +} + +// WriteNull writes a redis Null element +func (c *Peer) WriteNull() { + c.Block(func(w *Writer) { + w.WriteNull() + }) +} + +// WriteLen starts an array with the given length +func (c *Peer) WriteLen(n int) { + c.Block(func(w *Writer) { + w.WriteLen(n) + }) +} + +// WriteMapLen starts a map with the given length (number of keys) +func (c *Peer) WriteMapLen(n int) { + c.Block(func(w *Writer) { + w.WriteMapLen(n) + }) +} + +// WriteSetLen starts a set with the given length (number of elements) +func (c *Peer) WriteSetLen(n int) { + c.Block(func(w *Writer) { + w.WriteSetLen(n) + }) +} + +// WritePushLen starts a push-data array with the given length +func (c *Peer) WritePushLen(n int) { + c.Block(func(w *Writer) { + w.WritePushLen(n) + }) +} + +// WriteInt writes an integer +func (c *Peer) WriteInt(n int) { + c.Block(func(w *Writer) { + w.WriteInt(n) + }) +} + +// WriteFloat writes a float +func (c *Peer) WriteFloat(n float64) { + c.Block(func(w *Writer) { + w.WriteFloat(n) + }) +} + +// WriteRaw writes a raw redis response +func (c *Peer) WriteRaw(s string) { + c.Block(func(w *Writer) { + w.WriteRaw(s) + }) +} + +// WriteStrings is a helper to (bulk)write a string list +func (c *Peer) WriteStrings(strs []string) { + c.Block(func(w *Writer) { + w.WriteStrings(strs) + }) +} + +func toInline(s string) string { + return strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return ' ' + } + return r + }, s) +} + +// A Writer is given to the callback in Block() +type Writer struct { + w *bufio.Writer + resp3 bool +} + +// WriteError writes a redis 'Error' +func (w *Writer) WriteError(e string) { + fmt.Fprintf(w.w, "-%s\r\n", toInline(e)) +} + +func (w *Writer) WriteLen(n int) { + fmt.Fprintf(w.w, "*%d\r\n", n) +} + +func (w *Writer) WriteMapLen(n int) { + if w.resp3 { + fmt.Fprintf(w.w, "%%%d\r\n", n) + return + } + w.WriteLen(n * 2) +} + +func (w *Writer) WriteSetLen(n int) { + if w.resp3 { + fmt.Fprintf(w.w, "~%d\r\n", n) + return + } + w.WriteLen(n) +} + +func (w *Writer) WritePushLen(n int) { + if w.resp3 { + fmt.Fprintf(w.w, ">%d\r\n", n) + return + } + w.WriteLen(n) +} + +// WriteBulk writes a bulk string +func (w *Writer) WriteBulk(s string) { + fmt.Fprintf(w.w, "$%d\r\n%s\r\n", len(s), s) +} + +// WriteStrings writes a list of strings (bulk) +func (w *Writer) WriteStrings(strs []string) { + w.WriteLen(len(strs)) + for _, s := range strs { + w.WriteBulk(s) + } +} + +// WriteInt writes an integer +func (w *Writer) WriteInt(n int) { + fmt.Fprintf(w.w, ":%d\r\n", n) +} + +// WriteFloat writes a float +func (w *Writer) WriteFloat(n float64) { + if w.resp3 { + fmt.Fprintf(w.w, ",%s\r\n", formatFloat(n)) + return + } + w.WriteBulk(formatFloat(n)) +} + +// WriteNull writes a redis Null element +func (w *Writer) WriteNull() { + if w.resp3 { + fmt.Fprint(w.w, "_\r\n") + return + } + fmt.Fprintf(w.w, "$-1\r\n") +} + +// WriteInline writes a redis inline string +func (w *Writer) WriteInline(s string) { + fmt.Fprintf(w.w, "+%s\r\n", toInline(s)) +} + +// WriteRaw writes a raw redis response +func (w *Writer) WriteRaw(s string) { + fmt.Fprint(w.w, s) +} + +func (w *Writer) Flush() { + w.w.Flush() +} + +// formatFloat formats a float the way redis does (sort-of) +func formatFloat(v float64) string { + if math.IsInf(v, 1) { + return "inf" + } + if math.IsInf(v, -1) { + return "-inf" + } + return stripZeros(fmt.Sprintf("%.12f", v)) +} + +func stripZeros(sv string) string { + for strings.Contains(sv, ".") { + if sv[len(sv)-1] != '0' { + break + } + // Remove trailing 0s. + sv = sv[:len(sv)-1] + // Ends with a '.'. + if sv[len(sv)-1] == '.' { + sv = sv[:len(sv)-1] + break + } + } + return sv +} diff --git a/vendor/github.com/alicebob/miniredis/sorted_set.go b/vendor/github.com/alicebob/miniredis/v2/sorted_set.go similarity index 98% rename from vendor/github.com/alicebob/miniredis/sorted_set.go rename to vendor/github.com/alicebob/miniredis/v2/sorted_set.go index 9b1894d8..96ebd5d7 100644 --- a/vendor/github.com/alicebob/miniredis/sorted_set.go +++ b/vendor/github.com/alicebob/miniredis/v2/sorted_set.go @@ -10,7 +10,8 @@ import ( type direction int const ( - asc direction = iota + unsorted direction = iota + asc desc ) diff --git a/vendor/github.com/alicebob/miniredis/v2/stream.go b/vendor/github.com/alicebob/miniredis/v2/stream.go new file mode 100644 index 00000000..0c75f8ab --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/stream.go @@ -0,0 +1,349 @@ +// Basic stream implementation. + +package miniredis + +import ( + "errors" + "fmt" + "math" + "sort" + "strconv" + "strings" + "time" +) + +// a Stream is a list of entries, lowest ID (oldest) first, and all "groups". +type streamKey struct { + entries []StreamEntry + groups map[string]*streamGroup +} + +// a StreamEntry is an entry in a stream. The ID is always of the form +// "123-123". +// Values is an ordered list of key-value pairs. +type StreamEntry struct { + ID string + Values []string +} + +type streamGroup struct { + stream *streamKey + lastID string + pending []pendingEntry + consumers map[string]consumer +} + +type consumer struct { + // TODO: "last seen" timestamp +} + +type pendingEntry struct { + id string + consumer string + deliveryCount int + lastDelivery time.Time +} + +func newStreamKey() *streamKey { + return &streamKey{ + groups: map[string]*streamGroup{}, + } +} + +func (s *streamKey) generateID(now time.Time) string { + ts := uint64(now.UnixNano()) / 1_000_000 + + lastID := s.lastID() + + next := fmt.Sprintf("%d-%d", ts, 0) + if streamCmp(lastID, next) == -1 { + return next + } + last, _ := parseStreamID(lastID) + return fmt.Sprintf("%d-%d", last[0], last[1]+1) +} + +func (s *streamKey) lastID() string { + if len(s.entries) == 0 { + return "0-0" + } + + return s.entries[len(s.entries)-1].ID +} + +func parseStreamID(id string) ([2]uint64, error) { + var ( + res [2]uint64 + err error + ) + parts := strings.SplitN(id, "-", 2) + res[0], err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return res, errors.New(msgInvalidStreamID) + } + if len(parts) == 2 { + res[1], err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return res, errors.New(msgInvalidStreamID) + } + } + return res, nil +} + +// compares two stream IDs (of the full format: "123-123"). Returns: -1, 0, 1 +// The given IDs should be valid stream IDs. +func streamCmp(a, b string) int { + ap, _ := parseStreamID(a) + bp, _ := parseStreamID(b) + + switch { + case ap[0] < bp[0]: + return -1 + case ap[0] > bp[0]: + return 1 + case ap[1] < bp[1]: + return -1 + case ap[1] > bp[1]: + return 1 + default: + return 0 + } +} + +// formatStreamID makes a full id ("42-42") out of a partial one ("42") +func formatStreamID(id string) (string, error) { + var ts [2]uint64 + parts := strings.SplitN(id, "-", 2) + + if len(parts) > 0 { + p, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return "", errInvalidEntryID + } + ts[0] = p + } + if len(parts) > 1 { + p, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return "", errInvalidEntryID + } + ts[1] = p + } + return fmt.Sprintf("%d-%d", ts[0], ts[1]), nil +} + +func formatStreamRangeBound(id string, start bool, reverse bool) (string, error) { + if id == "-" { + return "0-0", nil + } + + if id == "+" { + return fmt.Sprintf("%d-%d", uint64(math.MaxUint64), uint64(math.MaxUint64)), nil + } + + if id == "0" { + return "0-0", nil + } + + parts := strings.Split(id, "-") + if len(parts) == 2 { + return formatStreamID(id) + } + + // Incomplete IDs case + ts, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return "", errInvalidEntryID + } + + if (!start && !reverse) || (start && reverse) { + return fmt.Sprintf("%d-%d", ts, uint64(math.MaxUint64)), nil + } + + return fmt.Sprintf("%d-%d", ts, 0), nil +} + +func reversedStreamEntries(o []StreamEntry) []StreamEntry { + newStream := make([]StreamEntry, len(o)) + for i, e := range o { + newStream[len(o)-i-1] = e + } + return newStream +} + +func (s *streamKey) createGroup(group, id string) error { + if _, ok := s.groups[group]; ok { + return errors.New("BUSYGROUP Consumer Group name already exists") + } + + if id == "$" { + id = s.lastID() + } + s.groups[group] = &streamGroup{ + stream: s, + lastID: id, + consumers: map[string]consumer{}, + } + return nil +} + +// streamAdd adds an entry to a stream. Returns the new entry ID. +// If id is empty or "*" the ID will be generated automatically. +// `values` should have an even length. +func (s *streamKey) add(entryID string, values []string, now time.Time) (string, error) { + if entryID == "" || entryID == "*" { + entryID = s.generateID(now) + } + + entryID, err := formatStreamID(entryID) + if err != nil { + return "", err + } + if entryID == "0-0" { + return "", errors.New(msgStreamIDZero) + } + if streamCmp(s.lastID(), entryID) != -1 { + return "", errors.New(msgStreamIDTooSmall) + } + + s.entries = append(s.entries, StreamEntry{ + ID: entryID, + Values: values, + }) + return entryID, nil +} + +func (s *streamKey) trim(n int) { + if len(s.entries) > n { + s.entries = s.entries[len(s.entries)-n:] + } +} + +// all entries after "id" +func (s *streamKey) after(id string) []StreamEntry { + pos := sort.Search(len(s.entries), func(i int) bool { + return streamCmp(id, s.entries[i].ID) < 0 + }) + return s.entries[pos:] +} + +// get a stream entry by ID +// Also returns the position in the entries slice, if found. +func (s *streamKey) get(id string) (int, *StreamEntry) { + pos := sort.Search(len(s.entries), func(i int) bool { + return streamCmp(id, s.entries[i].ID) <= 0 + }) + if len(s.entries) <= pos || s.entries[pos].ID != id { + return 0, nil + } + return pos, &s.entries[pos] +} + +func (g *streamGroup) readGroup( + now time.Time, + consumerID, + id string, + count int, + noack bool, +) []StreamEntry { + if id == ">" { + // undelivered messages + msgs := g.stream.after(g.lastID) + if len(msgs) == 0 { + return nil + } + + if count > 0 && len(msgs) > count { + msgs = msgs[:count] + } + + if !noack { + for _, msg := range msgs { + g.pending = append(g.pending, pendingEntry{ + id: msg.ID, + consumer: consumerID, + deliveryCount: 1, + lastDelivery: now, + }) + } + } + g.consumers[consumerID] = consumer{} + g.lastID = msgs[len(msgs)-1].ID + return msgs + } + + // re-deliver messages from the pending list. + // con := gr.consumers[consumerID] + msgs := g.pendingAfter(id) + var res []StreamEntry + for i, p := range msgs { + if p.consumer != consumerID { + continue + } + _, entry := g.stream.get(p.id) + // not found. Weird? + if entry == nil { + continue + } + p.deliveryCount += 1 + p.lastDelivery = now + msgs[i] = p + res = append(res, *entry) + } + return res +} + +func (g *streamGroup) ack(ids []string) (int, error) { + count := 0 + for _, id := range ids { + if _, err := parseStreamID(id); err != nil { + return 0, errors.New(msgInvalidStreamID) + } + + pos := sort.Search(len(g.pending), func(i int) bool { + return streamCmp(id, g.pending[i].id) <= 0 + }) + if len(g.pending) <= pos || g.pending[pos].id != id { + continue + } + + g.pending = append(g.pending[:pos], g.pending[pos+1:]...) + count++ + } + return count, nil +} + +func (s *streamKey) delete(ids []string) (int, error) { + count := 0 + for _, id := range ids { + if _, err := parseStreamID(id); err != nil { + return 0, errors.New(msgInvalidStreamID) + } + + i, entry := s.get(id) + if entry == nil { + continue + } + + s.entries = append(s.entries[:i], s.entries[i+1:]...) + count++ + } + return count, nil +} + +func (g *streamGroup) pendingAfter(id string) []pendingEntry { + pos := sort.Search(len(g.pending), func(i int) bool { + return streamCmp(id, g.pending[i].id) < 0 + }) + return g.pending[pos:] +} + +func (g *streamGroup) pendingCount(consumer string) int { + n := 0 + for _, p := range g.pending { + if p.consumer == consumer { + n++ + } + } + return n +} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml deleted file mode 100644 index c516ea88..00000000 --- a/vendor/github.com/cespare/xxhash/v2/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - "1.x" - - master -env: - - TAGS="" - - TAGS="-tags purego" -script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 2fd8693c..792b4a60 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -1,7 +1,7 @@ # xxhash -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) +[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) +[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) xxhash is a Go implementation of the 64-bit [xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a @@ -64,4 +64,6 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes' - [InfluxDB](https://github.com/influxdata/influxdb) - [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod deleted file mode 100644 index 49f67608..00000000 --- a/vendor/github.com/cespare/xxhash/v2/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/cespare/xxhash/v2 - -go 1.11 diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum deleted file mode 100644 index e69de29b..00000000 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index db0b35fb..15c835d5 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -193,7 +193,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error { b, d.v4 = consumeUint64(b) b, d.total = consumeUint64(b) copy(d.mem[:], b) - b = b[len(d.mem):] d.n = int(d.total % uint64(len(d.mem))) return nil } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index d580e32a..be8db5bf 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -6,7 +6,7 @@ // Register allocation: // AX h -// CX pointer to advance through b +// SI pointer to advance through b // DX n // BX loop end // R8 v1, k1 @@ -16,39 +16,39 @@ // R12 tmp // R13 prime1v // R14 prime2v -// R15 prime4v +// DI prime4v -// round reads from and advances the buffer pointer in CX. +// round reads from and advances the buffer pointer in SI. // It assumes that R13 has prime1v and R14 has prime2v. #define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ + MOVQ (SI), R12 \ + ADDQ $8, SI \ IMULQ R14, R12 \ ADDQ R12, r \ ROLQ $31, r \ IMULQ R13, r // mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. #define mergeRound(acc, val) \ IMULQ R14, val \ ROLQ $31, val \ IMULQ R13, val \ XORQ val, acc \ IMULQ R13, acc \ - ADDQ R15, acc + ADDQ DI, acc // func Sum64(b []byte) uint64 TEXT ·Sum64(SB), NOSPLIT, $0-32 // Load fixed primes. MOVQ ·prime1v(SB), R13 MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 + MOVQ ·prime4v(SB), DI // Load slice. - MOVQ b_base+0(FP), CX + MOVQ b_base+0(FP), SI MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX + LEAQ (SI)(DX*1), BX // The first loop limit will be len(b)-32. SUBQ $32, BX @@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32 XORQ R11, R11 SUBQ R13, R11 - // Loop until CX > BX. + // Loop until SI > BX. blockLoop: round(R8) round(R9) round(R10) round(R11) - CMPQ CX, BX + CMPQ SI, BX JLE blockLoop MOVQ R8, AX @@ -100,16 +100,16 @@ noBlocks: afterBlocks: ADDQ DX, AX - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. ADDQ $24, BX - CMPQ CX, BX + CMPQ SI, BX JG fourByte wordLoop: // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX + MOVQ (SI), R8 + ADDQ $8, SI IMULQ R14, R8 ROLQ $31, R8 IMULQ R13, R8 @@ -117,18 +117,18 @@ wordLoop: XORQ R8, AX ROLQ $27, AX IMULQ R13, AX - ADDQ R15, AX + ADDQ DI, AX - CMPQ CX, BX + CMPQ SI, BX JLE wordLoop fourByte: ADDQ $4, BX - CMPQ CX, BX + CMPQ SI, BX JG singles - MOVL (CX), R8 - ADDQ $4, CX + MOVL (SI), R8 + ADDQ $4, SI IMULQ R13, R8 XORQ R8, AX @@ -138,19 +138,19 @@ fourByte: singles: ADDQ $4, BX - CMPQ CX, BX + CMPQ SI, BX JGE finalize singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX + MOVBQZX (SI), R12 + ADDQ $1, SI IMULQ ·prime5v(SB), R12 XORQ R12, AX ROLQ $11, AX IMULQ R13, AX - CMPQ CX, BX + CMPQ SI, BX JL singlesLoop finalize: @@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40 MOVQ ·prime2v(SB), R14 // Load slice. - MOVQ b_base+8(FP), CX + MOVQ b_base+8(FP), SI MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX + LEAQ (SI)(DX*1), BX SUBQ $32, BX // Load vN from d. @@ -199,7 +199,7 @@ blockLoop: round(R10) round(R11) - CMPQ CX, BX + CMPQ SI, BX JLE blockLoop // Copy vN back to d. @@ -208,8 +208,8 @@ blockLoop: MOVQ R10, 16(AX) MOVQ R11, 24(AX) - // The number of bytes written is CX minus the old base pointer. - SUBQ b_base+8(FP), CX - MOVQ CX, ret+32(FP) + // The number of bytes written is SI minus the old base pointer. + SUBQ b_base+8(FP), SI + MOVQ SI, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 53bf76ef..376e0ca2 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -6,41 +6,52 @@ package xxhash import ( - "reflect" "unsafe" ) -// Notes: +// In the future it's possible that compiler optimizations will make these +// XxxString functions unnecessary by realizing that calls such as +// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// If that happens, even if we keep these functions they can be replaced with +// the trivial safe code. + +// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: // -// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ -// for some discussion about these unsafe conversions. +// var b []byte +// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) +// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data +// bh.Len = len(s) +// bh.Cap = len(s) // -// In the future it's possible that compiler optimizations will make these -// unsafe operations unnecessary: https://golang.org/issue/2205. +// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough +// weight to this sequence of expressions that any function that uses it will +// not be inlined. Instead, the functions below use a different unsafe +// conversion designed to minimize the inliner weight and allow both to be +// inlined. There is also a test (TestInlining) which verifies that these are +// inlined. // -// Both of these wrapper functions still incur function call overhead since they -// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write -// for strings to squeeze out a bit more speed. Mid-stack inlining should -// eventually fix this. +// See https://github.com/golang/go/issues/42739 for discussion. // Sum64String computes the 64-bit xxHash digest of s. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) + b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) return Sum64(b) } // WriteString adds more data to d. It always returns len(s), nil. // It may be faster than Write([]byte(s)) by avoiding a copy. func (d *Digest) WriteString(s string) (n int, err error) { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return d.Write(b) + d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) + // d.Write always returns len(s), nil. + // Ignoring the return output and returning these fixed values buys a + // savings of 6 in the inliner's cost model. + return len(s), nil +} + +// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout +// of the first two words is the same as the layout of a string. +type sliceHeader struct { + s string + cap int } diff --git a/vendor/github.com/dlmiddlecote/sqlstats/go.mod b/vendor/github.com/dlmiddlecote/sqlstats/go.mod deleted file mode 100644 index 5bf1dd33..00000000 --- a/vendor/github.com/dlmiddlecote/sqlstats/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/dlmiddlecote/sqlstats - -go 1.11 - -require ( - github.com/mattn/go-sqlite3 v1.14.6 - github.com/prometheus/client_golang v1.3.0 - github.com/stretchr/testify v1.3.0 -) diff --git a/vendor/github.com/dlmiddlecote/sqlstats/go.sum b/vendor/github.com/dlmiddlecote/sqlstats/go.sum deleted file mode 100644 index e9589935..00000000 --- a/vendor/github.com/dlmiddlecote/sqlstats/go.sum +++ /dev/null @@ -1,84 +0,0 @@ -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f h1:68K/z8GLUxV76xGSqwTWw2gyk/jwn79LUL43rES2g8o= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap new file mode 100644 index 00000000..a04f2907 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.mailmap @@ -0,0 +1,2 @@ +Chris Howey +Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml deleted file mode 100644 index a9c30165..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -sudo: false -language: go - -go: - - "stable" - - "1.11.x" - - "1.10.x" - - "1.9.x" - -matrix: - include: - - go: "stable" - env: GOLINT=true - allow_failures: - - go: tip - fast_finish: true - - -before_install: - - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi - -script: - - go test --race ./... - -after_script: - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi - - go vet ./... - -os: - - linux - - osx - - windows - -notifications: - email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS index 5ab5d41c..6cbabe5e 100644 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -4,35 +4,44 @@ # You can update this list using the following command: # -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' +# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS # Please keep the list sorted. Aaron L Adrien Bustany +Alexey Kazakov Amit Krishnan Anmol Sethi Bjørn Erik Pedersen +Brian Goff Bruno Bigras Caleb Spare Case Nelson -Chris Howey +Chris Howey Christoffer Buchholz Daniel Wagner-Hall Dave Cheney +Eric Lin Evan Phoenix Francisco Souza +Gautam Dey Hari haran -John C Barstow +Ichinose Shogo +Johannes Ebke +John C Barstow Kelvin Fo Ken-ichirou MATSUZAWA Matt Layher +Matthias Stone Nathan Youngman Nickolai Zeldovich +Oliver Bristow Patrick Paul Hammond Pawel Knap Pieter Droogendijk +Pratik Shinde Pursuit92 Riku Voipio Rob Figueiredo @@ -41,6 +50,7 @@ Slawek Ligus Soge Zhang Tiffany Jernigan Tilak Sharma +Tobias Klauser Tom Payne Travis Cline Tudor Golubenco diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index be4d7ea2..a438fe4b 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,6 +1,28 @@ # Changelog -## v1.4.7 / 2018-01-09 +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.5.1] - 2021-08-24 + +* Revert Add AddRaw to not follow symlinks + +## [1.5.0] - 2021-08-20 + +* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) +* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) +* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) +* CI: Use GitHub Actions for CI and cover go 1.12-1.17 + [#378](https://github.com/fsnotify/fsnotify/pull/378) + [#381](https://github.com/fsnotify/fsnotify/pull/381) + [#385](https://github.com/fsnotify/fsnotify/pull/385) +* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) + +## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) * Tests: Fix missing verb on format string (thanks @rchiossi) @@ -10,62 +32,62 @@ * Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) * Docs: replace references to OS X with macOS -## v1.4.2 / 2016-10-10 +## [1.4.2] - 2016-10-10 * Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) -## v1.4.1 / 2016-10-04 +## [1.4.1] - 2016-10-04 * Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) -## v1.4.0 / 2016-10-01 +## [1.4.0] - 2016-10-01 * add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) -## v1.3.1 / 2016-06-28 +## [1.3.1] - 2016-06-28 * Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) -## v1.3.0 / 2016-04-19 +## [1.3.0] - 2016-04-19 * Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) -## v1.2.10 / 2016-03-02 +## [1.2.10] - 2016-03-02 * Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) -## v1.2.9 / 2016-01-13 +## [1.2.9] - 2016-01-13 kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) -## v1.2.8 / 2015-12-17 +## [1.2.8] - 2015-12-17 * kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) * inotify: fix race in test * enable race detection for continuous integration (Linux, Mac, Windows) -## v1.2.5 / 2015-10-17 +## [1.2.5] - 2015-10-17 * inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) * inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) * kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) * kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) -## v1.2.1 / 2015-10-14 +## [1.2.1] - 2015-10-14 * kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) -## v1.2.0 / 2015-02-08 +## [1.2.0] - 2015-02-08 * inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) * inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) * kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) -## v1.1.1 / 2015-02-05 +## [1.1.1] - 2015-02-05 * inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) -## v1.1.0 / 2014-12-12 +## [1.1.0] - 2014-12-12 * kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) * add low-level functions @@ -77,22 +99,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## v1.0.4 / 2014-09-07 +## [1.0.4] - 2014-09-07 * kqueue: add dragonfly to the build tags. * Rename source code files, rearrange code so exported APIs are at the top. * Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) -## v1.0.3 / 2014-08-19 +## [1.0.3] - 2014-08-19 * [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) -## v1.0.2 / 2014-08-17 +## [1.0.2] - 2014-08-17 * [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) * [Fix] Make ./path and path equivalent. (thanks @zhsso) -## v1.0.0 / 2014-08-15 +## [1.0.0] - 2014-08-15 * [API] Remove AddWatch on Windows, use Add. * Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) @@ -146,51 +168,51 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * no tests for the current implementation * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) -## v0.9.3 / 2014-12-31 +## [0.9.3] - 2014-12-31 * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## v0.9.2 / 2014-08-17 +## [0.9.2] - 2014-08-17 * [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -## v0.9.1 / 2014-06-12 +## [0.9.1] - 2014-06-12 * Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) -## v0.9.0 / 2014-01-17 +## [0.9.0] - 2014-01-17 * IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) * [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) * [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. -## v0.8.12 / 2013-11-13 +## [0.8.12] - 2013-11-13 * [API] Remove FD_SET and friends from Linux adapter -## v0.8.11 / 2013-11-02 +## [0.8.11] - 2013-11-02 * [Doc] Add Changelog [#72][] (thanks @nathany) * [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) -## v0.8.10 / 2013-10-19 +## [0.8.10] - 2013-10-19 * [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) * [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) * [Doc] specify OS-specific limits in README (thanks @debrando) -## v0.8.9 / 2013-09-08 +## [0.8.9] - 2013-09-08 * [Doc] Contributing (thanks @nathany) * [Doc] update package path in example code [#63][] (thanks @paulhammond) * [Doc] GoCI badge in README (Linux only) [#60][] * [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) -## v0.8.8 / 2013-06-17 +## [0.8.8] - 2013-06-17 * [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) -## v0.8.7 / 2013-06-03 +## [0.8.7] - 2013-06-03 * [API] Make syscall flags internal * [Fix] inotify: ignore event changes @@ -198,74 +220,74 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] tests on Windows * lower case error messages -## v0.8.6 / 2013-05-23 +## [0.8.6] - 2013-05-23 * kqueue: Use EVT_ONLY flag on Darwin * [Doc] Update README with full example -## v0.8.5 / 2013-05-09 +## [0.8.5] - 2013-05-09 * [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) -## v0.8.4 / 2013-04-07 +## [0.8.4] - 2013-04-07 * [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) -## v0.8.3 / 2013-03-13 +## [0.8.3] - 2013-03-13 * [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) * [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) -## v0.8.2 / 2013-02-07 +## [0.8.2] - 2013-02-07 * [Doc] add Authors * [Fix] fix data races for map access [#29][] (thanks @fsouza) -## v0.8.1 / 2013-01-09 +## [0.8.1] - 2013-01-09 * [Fix] Windows path separators * [Doc] BSD License -## v0.8.0 / 2012-11-09 +## [0.8.0] - 2012-11-09 * kqueue: directory watching improvements (thanks @vmirage) * inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) * [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) -## v0.7.4 / 2012-10-09 +## [0.7.4] - 2012-10-09 * [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) * [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) * [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) * [Fix] kqueue: modify after recreation of file -## v0.7.3 / 2012-09-27 +## [0.7.3] - 2012-09-27 * [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) * [Fix] kqueue: no longer get duplicate CREATE events -## v0.7.2 / 2012-09-01 +## [0.7.2] - 2012-09-01 * kqueue: events for created directories -## v0.7.1 / 2012-07-14 +## [0.7.1] - 2012-07-14 * [Fix] for renaming files -## v0.7.0 / 2012-07-02 +## [0.7.0] - 2012-07-02 * [Feature] FSNotify flags * [Fix] inotify: Added file name back to event path -## v0.6.0 / 2012-06-06 +## [0.6.0] - 2012-06-06 * kqueue: watch files after directory created (thanks @tmc) -## v0.5.1 / 2012-05-22 +## [0.5.1] - 2012-05-22 * [Fix] inotify: remove all watches before Close() -## v0.5.0 / 2012-05-03 +## [0.5.0] - 2012-05-03 * [API] kqueue: return errors during watch instead of sending over channel * kqueue: match symlink behavior on Linux @@ -273,22 +295,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] kqueue: handle EINTR (reported by @robfig) * [Doc] Godoc example [#1][] (thanks @davecheney) -## v0.4.0 / 2012-03-30 +## [0.4.0] - 2012-03-30 * Go 1 released: build with go tool * [Feature] Windows support using winfsnotify * Windows does not have attribute change notifications * Roll attribute notifications into IsModify -## v0.3.0 / 2012-02-19 +## [0.3.0] - 2012-02-19 * kqueue: add files when watch directory -## v0.2.0 / 2011-12-30 +## [0.2.0] - 2011-12-30 * update to latest Go weekly code -## v0.1.0 / 2011-10-19 +## [0.1.0] - 2011-10-19 * kqueue: add watch on file creation to match inotify * kqueue: create file event diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index b2629e52..df57b1b2 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -12,9 +12,9 @@ Cross platform: Windows, Linux, BSD and macOS. | Adapter | OS | Status | | --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| inotify | Linux 2.6.27 or later, Android\* | Supported | +| kqueue | BSD, macOS, iOS\* | Supported | +| ReadDirectoryChangesW | Windows | Supported | | FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | | FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | | fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go index ced39cb8..b3ac3d8f 100644 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build solaris // +build solaris package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 89cab046..0f4ee52e 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !plan9 // +build !plan9 // Package fsnotify provides a platform-independent interface for file system notifications. diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod deleted file mode 100644 index ff11e13f..00000000 --- a/vendor/github.com/fsnotify/fsnotify/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/fsnotify/fsnotify - -go 1.13 - -require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum deleted file mode 100644 index f60af985..00000000 --- a/vendor/github.com/fsnotify/fsnotify/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go index d9fd1b88..eb87699b 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package fsnotify @@ -272,7 +273,7 @@ func (w *Watcher) readEvents() { if nameLen > 0 { // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] // The filename is padded with NULL bytes. TrimRight() gets rid of those. name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") } diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go index b33f2b4d..e9ff9439 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go index 86e76a3d..368f5b79 100644 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd || openbsd || netbsd || dragonfly || darwin // +build freebsd openbsd netbsd dragonfly darwin package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go index 2306c462..36cc3845 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go index 870c4d6d..98cd8476 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin // +build darwin package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go index 09436f31..c02b75f7 100644 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package fsnotify diff --git a/vendor/github.com/getsentry/sentry-go/go.mod b/vendor/github.com/getsentry/sentry-go/go.mod deleted file mode 100644 index 300f22ff..00000000 --- a/vendor/github.com/getsentry/sentry-go/go.mod +++ /dev/null @@ -1,34 +0,0 @@ -module github.com/getsentry/sentry-go - -go 1.14 - -require ( - github.com/ajg/form v1.5.1 // indirect - github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect - github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072 // indirect - github.com/gin-gonic/gin v1.4.0 - github.com/go-errors/errors v1.0.1 - github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab - github.com/google/go-cmp v0.5.5 - github.com/google/go-querystring v1.0.0 // indirect - github.com/imkira/go-interpol v1.1.0 // indirect - github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect - github.com/kataras/iris/v12 v12.1.8 - github.com/labstack/echo/v4 v4.1.11 - github.com/moul/http2curl v1.0.0 // indirect - github.com/onsi/ginkgo v1.10.3 // indirect - github.com/onsi/gomega v1.7.1 // indirect - github.com/pingcap/errors v0.11.4 - github.com/pkg/errors v0.8.1 - github.com/sergi/go-diff v1.0.0 // indirect - github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect - github.com/smartystreets/goconvey v1.6.4 // indirect - github.com/ugorji/go v1.1.7 // indirect - github.com/urfave/negroni v1.0.0 - github.com/valyala/fasthttp v1.6.0 - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 // indirect - github.com/yudai/gojsondiff v1.0.0 // indirect - github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect - github.com/yudai/pp v2.0.1+incompatible // indirect -) diff --git a/vendor/github.com/getsentry/sentry-go/go.sum b/vendor/github.com/getsentry/sentry-go/go.sum deleted file mode 100644 index dcd1d2cf..00000000 --- a/vendor/github.com/getsentry/sentry-go/go.sum +++ /dev/null @@ -1,256 +0,0 @@ -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 h1:sR+/8Yb4slttB4vD+b9btVEnWgL3Q00OBTzVT8B9C0c= -github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= -github.com/CloudyKit/jet/v3 v3.0.0 h1:1PwO5w5VCtlUUl+KTOBsTGZlhjWkcybsGaAau52tOy8= -github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= -github.com/Joker/hpp v1.0.0 h1:65+iuJYdRXv/XyN62C1uEmmOx3432rNG/rKlX6V7Kkc= -github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= -github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398 h1:WDC6ySpJzbxGWFh4aMxFFC28wwGp5pEuoTtvA4q/qQ4= -github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= -github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= -github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible h1:Ppm0npCCsmuR9oQaBtRuZcmILVE74aXE+AmrJj8L2ns= -github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= -github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072 h1:DddqAaWDpywytcG8w/qoQ5sAN8X12d3Z3koB0C3Rxsc= -github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8= -github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= -github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g= -github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= -github.com/gin-gonic/gin v1.4.0 h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ= -github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk= -github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/iris-contrib/blackfriday v2.0.0+incompatible h1:o5sHQHHm0ToHUlAJSTjW9UWicjJSDDauOOQ2AHuIVp4= -github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= -github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= -github.com/iris-contrib/jade v1.1.3 h1:p7J/50I0cjo0wq/VWVCDFd8taPJbuFC+bq23SniRFX0= -github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= -github.com/iris-contrib/pongo2 v0.0.1 h1:zGP7pW51oi5eQZMIlGA3I+FHY9/HOQWDB+572yin0to= -github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= -github.com/iris-contrib/schema v0.0.1 h1:10g/WnoRR+U+XXHWKBHeNy/+tZmM2kcAVGLOsz+yaDA= -github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kataras/golog v0.0.10 h1:vRDRUmwacco/pmBAm8geLn8rHEdc+9Z4NAr5Sh7TG/4= -github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= -github.com/kataras/iris/v12 v12.1.8 h1:O3gJasjm7ZxpxwTH8tApZsvf274scSGQAUpNe47c37U= -github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= -github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= -github.com/kataras/pio v0.0.2 h1:6NAi+uPJ/Zuid6mrAKlgpbI11/zK/lV4B2rxWaJN98Y= -github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= -github.com/kataras/sitemap v0.0.5 h1:4HCONX5RLgVy6G4RkYOV3vKNcma9p236LdGOipJsaFE= -github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.7 h1:hYW1gP94JUmAhBtJ+LNz5My+gBobDxPR1iVuKug26aA= -github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/labstack/echo/v4 v4.1.11 h1:z0BZoArY4FqdpUEl+wlHp4hnr/oSR6MTmQmv8OHSoww= -github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= -github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= -github.com/microcosm-cc/bluemonday v1.0.2 h1:5lPfLTTAvAbtS0VqT+94yOtFnGfUWYyx0+iToC3Os3s= -github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= -github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/schollz/closestmatch v2.1.0+incompatible h1:Uel2GXEpJqOWBrlyI+oY9LTiyyjYS17cCYRqP13/SHk= -github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.6.0 h1:uWF8lgKmeaIewWVPwi4GRq2P6+R46IgYZdxWtM+GtEY= -github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= -github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY= -github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= -github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876 h1:sKJQZMuxjOAR/Uo2LBfU90onWEf1dF4C+0hPJCc9Mpc= -golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ= -gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= -gopkg.in/ini.v1 v1.51.1 h1:GyboHr4UqMiLUybYjd22ZjQIKEJEpgtLXtuGbR21Oho= -gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2 h1:XZx7nhd5GMaZpmDaEHFVafUZC7ya0fuo7cSJ3UCKYmM= -gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/gin-contrib/sse/go.mod b/vendor/github.com/gin-contrib/sse/go.mod deleted file mode 100644 index b9c03f47..00000000 --- a/vendor/github.com/gin-contrib/sse/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/gin-contrib/sse - -go 1.12 - -require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/gin-contrib/sse/go.sum b/vendor/github.com/gin-contrib/sse/go.sum deleted file mode 100644 index 4347755a..00000000 --- a/vendor/github.com/gin-contrib/sse/go.sum +++ /dev/null @@ -1,7 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/gin-gonic/gin/.travis.yml b/vendor/github.com/gin-gonic/gin/.travis.yml index 8ebae712..bcc21414 100644 --- a/vendor/github.com/gin-gonic/gin/.travis.yml +++ b/vendor/github.com/gin-gonic/gin/.travis.yml @@ -3,8 +3,6 @@ language: go matrix: fast_finish: true include: - - go: 1.12.x - env: GO111MODULE=on - go: 1.13.x - go: 1.13.x env: diff --git a/vendor/github.com/gin-gonic/gin/CHANGELOG.md b/vendor/github.com/gin-gonic/gin/CHANGELOG.md index a28edc84..4c806a5a 100644 --- a/vendor/github.com/gin-gonic/gin/CHANGELOG.md +++ b/vendor/github.com/gin-gonic/gin/CHANGELOG.md @@ -1,5 +1,41 @@ # Gin ChangeLog +## Gin v1.7.7 + +### BUGFIXES + +* Fixed X-Forwarded-For unsafe handling of CVE-2020-28483 [#2844](https://github.com/gin-gonic/gin/pull/2844), closed issue [#2862](https://github.com/gin-gonic/gin/issues/2862). +* Tree: updated the code logic for `latestNode` [#2897](https://github.com/gin-gonic/gin/pull/2897), closed issue [#2894](https://github.com/gin-gonic/gin/issues/2894) [#2878](https://github.com/gin-gonic/gin/issues/2878). +* Tree: fixed the misplacement of adding slashes [#2847](https://github.com/gin-gonic/gin/pull/2847), closed issue [#2843](https://github.com/gin-gonic/gin/issues/2843). +* Tree: fixed tsr with mixed static and wildcard paths [#2924](https://github.com/gin-gonic/gin/pull/2924), closed issue [#2918](https://github.com/gin-gonic/gin/issues/2918). + +### ENHANCEMENTS + +* TrustedProxies: make it backward-compatible [#2887](https://github.com/gin-gonic/gin/pull/2887), closed issue [#2819](https://github.com/gin-gonic/gin/issues/2819). +* TrustedPlatform: provide custom options for another CDN services [#2906](https://github.com/gin-gonic/gin/pull/2906). + +### DOCS + +* NoMethod: added usage annotation ([#2832](https://github.com/gin-gonic/gin/pull/2832#issuecomment-929954463)). + +## Gin v1.7.6 + +### BUGFIXES + +* bump new release to fix v1.7.5 release error by using v1.7.4 codes. + +## Gin v1.7.4 + +### BUGFIXES + +* bump new release to fix checksum mismatch + +## Gin v1.7.3 + +### BUGFIXES + +* fix level 1 router match [#2767](https://github.com/gin-gonic/gin/issues/2767), [#2796](https://github.com/gin-gonic/gin/issues/2796) + ## Gin v1.7.2 ### BUGFIXES diff --git a/vendor/github.com/gin-gonic/gin/README.md b/vendor/github.com/gin-gonic/gin/README.md index d4772d76..9bf459b0 100644 --- a/vendor/github.com/gin-gonic/gin/README.md +++ b/vendor/github.com/gin-gonic/gin/README.md @@ -77,6 +77,7 @@ Gin is a web framework written in Go (Golang). It features a martini-like API wi - [http2 server push](#http2-server-push) - [Define format for the log of routes](#define-format-for-the-log-of-routes) - [Set and get a cookie](#set-and-get-a-cookie) + - [Don't trust all proxies](#don't-trust-all-proxies) - [Testing](#testing) - [Users](#users) @@ -84,7 +85,7 @@ Gin is a web framework written in Go (Golang). It features a martini-like API wi To install Gin package, you need to install Go and set your Go workspace first. -1. The first need [Go](https://golang.org/) installed (**version 1.12+ is required**), then you can use the below Go command to install Gin. +1. The first need [Go](https://golang.org/) installed (**version 1.13+ is required**), then you can use the below Go command to install Gin. ```sh $ go get -u github.com/gin-gonic/gin @@ -2130,11 +2131,17 @@ Gin lets you specify which headers to hold the real client IP (if any), as well as specifying which proxies (or direct clients) you trust to specify one of these headers. -The `TrustedProxies` slice on your `gin.Engine` specifes network addresses or -network CIDRs from where clients which their request headers related to client +Use function `SetTrustedProxies()` on your `gin.Engine` to specify network addresses +or network CIDRs from where clients which their request headers related to client IP can be trusted. They can be IPv4 addresses, IPv4 CIDRs, IPv6 addresses or IPv6 CIDRs. +**Attention:** Gin trust all proxies by default if you don't specify a trusted +proxy using the function above, **this is NOT safe**. At the same time, if you don't +use any proxy, you can disable this feature by using `Engine.SetTrustedProxies(nil)`, +then `Context.ClientIP()` will return the remote address directly to avoid some +unnecessary computation. + ```go import ( "fmt" @@ -2145,7 +2152,7 @@ import ( func main() { router := gin.Default() - router.TrustedProxies = []string{"192.168.1.2"} + router.SetTrustedProxies([]string{"192.168.1.2"}) router.GET("/", func(c *gin.Context) { // If the client is 192.168.1.2, use the X-Forwarded-For @@ -2158,6 +2165,34 @@ func main() { } ``` +**Notice:** If you are using a CDN service, you can set the `Engine.TrustedPlatform` +to skip TrustedProxies check, it has a higher priority than TrustedProxies. +Look at the example below: +```go +import ( + "fmt" + + "github.com/gin-gonic/gin" +) + +func main() { + + router := gin.Default() + // Use predefined header gin.PlatformXXX + router.TrustedPlatform = gin.PlatformGoogleAppEngine + // Or set your own trusted request header for another trusted proxy service + // Don't set it to any suspect request header, it's unsafe + router.TrustedPlatform = "X-CDN-IP" + + router.GET("/", func(c *gin.Context) { + // If you set TrustedPlatform, ClientIP() will resolve the + // corresponding header and return IP directly + fmt.Printf("ClientIP: %s\n", c.ClientIP()) + }) + router.Run() +} +``` + ## Testing The `net/http/httptest` package is preferable way for HTTP testing. diff --git a/vendor/github.com/gin-gonic/gin/context.go b/vendor/github.com/gin-gonic/gin/context.go index dc03c358..220d1bc7 100644 --- a/vendor/github.com/gin-gonic/gin/context.go +++ b/vendor/github.com/gin-gonic/gin/context.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "io/ioutil" + "log" "math" "mime/multipart" "net" @@ -53,8 +54,9 @@ type Context struct { index int8 fullPath string - engine *Engine - params *Params + engine *Engine + params *Params + skippedNodes *[]skippedNode // This mutex protect Keys map mu sync.RWMutex @@ -96,7 +98,8 @@ func (c *Context) reset() { c.Accepted = nil c.queryCache = nil c.formCache = nil - *c.params = (*c.params)[0:0] + *c.params = (*c.params)[:0] + *c.skippedNodes = (*c.skippedNodes)[:0] } // Copy returns a copy of the current context that can be safely used outside the request's scope. @@ -725,13 +728,23 @@ func (c *Context) ShouldBindBodyWith(obj interface{}, bb binding.BindingBody) (e return bb.BindBody(body, obj) } -// ClientIP implements a best effort algorithm to return the real client IP. +// ClientIP implements one best effort algorithm to return the real client IP. // It called c.RemoteIP() under the hood, to check if the remote IP is a trusted proxy or not. -// If it's it will then try to parse the headers defined in Engine.RemoteIPHeaders (defaulting to [X-Forwarded-For, X-Real-Ip]). -// If the headers are nots syntactically valid OR the remote IP does not correspong to a trusted proxy, +// If it is it will then try to parse the headers defined in Engine.RemoteIPHeaders (defaulting to [X-Forwarded-For, X-Real-Ip]). +// If the headers are not syntactically valid OR the remote IP does not correspond to a trusted proxy, // the remote IP (coming form Request.RemoteAddr) is returned. func (c *Context) ClientIP() string { + // Check if we're running on a trusted platform, continue running backwards if error + if c.engine.TrustedPlatform != "" { + // Developers can define their own header of Trusted Platform or use predefined constants + if addr := c.requestHeader(c.engine.TrustedPlatform); addr != "" { + return addr + } + } + + // Legacy "AppEngine" flag if c.engine.AppEngine { + log.Println(`The AppEngine flag is going to be deprecated. Please check issues #2723 and #2739 and use 'TrustedPlatform: gin.PlatformGoogleAppEngine' instead.`) if addr := c.requestHeader("X-Appengine-Remote-Addr"); addr != "" { return addr } @@ -744,7 +757,7 @@ func (c *Context) ClientIP() string { if trusted && c.engine.ForwardedByClientIP && c.engine.RemoteIPHeaders != nil { for _, headerName := range c.engine.RemoteIPHeaders { - ip, valid := validateHeader(c.requestHeader(headerName)) + ip, valid := c.engine.validateHeader(c.requestHeader(headerName)) if valid { return ip } @@ -753,10 +766,21 @@ func (c *Context) ClientIP() string { return remoteIP.String() } +func (e *Engine) isTrustedProxy(ip net.IP) bool { + if e.trustedCIDRs != nil { + for _, cidr := range e.trustedCIDRs { + if cidr.Contains(ip) { + return true + } + } + } + return false +} + // RemoteIP parses the IP from Request.RemoteAddr, normalizes and returns the IP (without the port). // It also checks if the remoteIP is a trusted proxy or not. // In order to perform this validation, it will see if the IP is contained within at least one of the CIDR blocks -// defined in Engine.TrustedProxies +// defined by Engine.SetTrustedProxies() func (c *Context) RemoteIP() (net.IP, bool) { ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)) if err != nil { @@ -767,35 +791,25 @@ func (c *Context) RemoteIP() (net.IP, bool) { return nil, false } - if c.engine.trustedCIDRs != nil { - for _, cidr := range c.engine.trustedCIDRs { - if cidr.Contains(remoteIP) { - return remoteIP, true - } - } - } - - return remoteIP, false + return remoteIP, c.engine.isTrustedProxy(remoteIP) } -func validateHeader(header string) (clientIP string, valid bool) { +func (e *Engine) validateHeader(header string) (clientIP string, valid bool) { if header == "" { return "", false } items := strings.Split(header, ",") - for i, ipStr := range items { - ipStr = strings.TrimSpace(ipStr) + for i := len(items) - 1; i >= 0; i-- { + ipStr := strings.TrimSpace(items[i]) ip := net.ParseIP(ipStr) if ip == nil { return "", false } - // We need to return the first IP in the list, but, - // we should not early return since we need to validate that - // the rest of the header is syntactically valid - if i == 0 { - clientIP = ipStr - valid = true + // X-Forwarded-For is appended by proxy + // Check IPs in reverse order and stop when find untrusted proxy + if (i == 0) || (!e.isTrustedProxy(ip)) { + return ipStr, true } } return diff --git a/vendor/github.com/gin-gonic/gin/context_appengine.go b/vendor/github.com/gin-gonic/gin/context_appengine.go index d5658434..8bf93896 100644 --- a/vendor/github.com/gin-gonic/gin/context_appengine.go +++ b/vendor/github.com/gin-gonic/gin/context_appengine.go @@ -8,5 +8,5 @@ package gin func init() { - defaultAppEngine = true + defaultPlatform = PlatformGoogleAppEngine } diff --git a/vendor/github.com/gin-gonic/gin/debug.go b/vendor/github.com/gin-gonic/gin/debug.go index 4c7cd0c3..9bacc685 100644 --- a/vendor/github.com/gin-gonic/gin/debug.go +++ b/vendor/github.com/gin-gonic/gin/debug.go @@ -12,7 +12,7 @@ import ( "strings" ) -const ginSupportMinGoVer = 12 +const ginSupportMinGoVer = 13 // IsDebugging returns true if the framework is running in debug mode. // Use SetMode(gin.ReleaseMode) to disable debug mode. @@ -67,7 +67,7 @@ func getMinVer(v string) (uint64, error) { func debugPrintWARNINGDefault() { if v, e := getMinVer(runtime.Version()); e == nil && v <= ginSupportMinGoVer { - debugPrint(`[WARNING] Now Gin requires Go 1.12+. + debugPrint(`[WARNING] Now Gin requires Go 1.13+. `) } diff --git a/vendor/github.com/gin-gonic/gin/gin.go b/vendor/github.com/gin-gonic/gin/gin.go index 03a0e127..58e76f41 100644 --- a/vendor/github.com/gin-gonic/gin/gin.go +++ b/vendor/github.com/gin-gonic/gin/gin.go @@ -11,6 +11,7 @@ import ( "net/http" "os" "path" + "reflect" "strings" "sync" @@ -25,7 +26,9 @@ var ( default405Body = []byte("405 method not allowed") ) -var defaultAppEngine bool +var defaultPlatform string + +var defaultTrustedCIDRs = []*net.IPNet{{IP: net.IP{0x0, 0x0, 0x0, 0x0}, Mask: net.IPMask{0x0, 0x0, 0x0, 0x0}}} // 0.0.0.0/0 // HandlerFunc defines the handler used by gin middleware as return value. type HandlerFunc func(*Context) @@ -52,6 +55,16 @@ type RouteInfo struct { // RoutesInfo defines a RouteInfo array. type RoutesInfo []RouteInfo +// Trusted platforms +const ( + // When running on Google App Engine. Trust X-Appengine-Remote-Addr + // for determining the client's IP + PlatformGoogleAppEngine = "X-Appengine-Remote-Addr" + // When using Cloudflare's CDN. Trust CF-Connecting-IP for determining + // the client's IP + PlatformCloudflare = "CF-Connecting-IP" +) + // Engine is the framework's instance, it contains the muxer, middleware and configuration settings. // Create an instance of Engine, by using New() or Default() type Engine struct { @@ -89,18 +102,7 @@ type Engine struct { // `(*gin.Context).Request.RemoteAddr`. ForwardedByClientIP bool - // List of headers used to obtain the client IP when - // `(*gin.Engine).ForwardedByClientIP` is `true` and - // `(*gin.Context).Request.RemoteAddr` is matched by at least one of the - // network origins of `(*gin.Engine).TrustedProxies`. - RemoteIPHeaders []string - - // List of network origins (IPv4 addresses, IPv4 CIDRs, IPv6 addresses or - // IPv6 CIDRs) from which to trust request's headers that contain - // alternative client IP when `(*gin.Engine).ForwardedByClientIP` is - // `true`. - TrustedProxies []string - + // DEPRECATED: USE `TrustedPlatform` WITH VALUE `gin.GoogleAppEngine` INSTEAD // #726 #755 If enabled, it will trust some headers starting with // 'X-AppEngine...' for better integration with that PaaS. AppEngine bool @@ -113,14 +115,24 @@ type Engine struct { // as url.Path gonna be used, which is already unescaped. UnescapePathValues bool - // Value of 'maxMemory' param that is given to http.Request's ParseMultipartForm - // method call. - MaxMultipartMemory int64 - // RemoveExtraSlash a parameter can be parsed from the URL even with extra slashes. // See the PR #1817 and issue #1644 RemoveExtraSlash bool + // List of headers used to obtain the client IP when + // `(*gin.Engine).ForwardedByClientIP` is `true` and + // `(*gin.Context).Request.RemoteAddr` is matched by at least one of the + // network origins of list defined by `(*gin.Engine).SetTrustedProxies()`. + RemoteIPHeaders []string + + // If set to a constant of value gin.Platform*, trusts the headers set by + // that platform, for example to determine the client IP + TrustedPlatform string + + // Value of 'maxMemory' param that is given to http.Request's ParseMultipartForm + // method call. + MaxMultipartMemory int64 + delims render.Delims secureJSONPrefix string HTMLRender render.HTMLRender @@ -132,6 +144,8 @@ type Engine struct { pool sync.Pool trees methodTrees maxParams uint16 + maxSections uint16 + trustedProxies []string trustedCIDRs []*net.IPNet } @@ -159,8 +173,7 @@ func New() *Engine { HandleMethodNotAllowed: false, ForwardedByClientIP: true, RemoteIPHeaders: []string{"X-Forwarded-For", "X-Real-IP"}, - TrustedProxies: []string{"0.0.0.0/0"}, - AppEngine: defaultAppEngine, + TrustedPlatform: defaultPlatform, UseRawPath: false, RemoveExtraSlash: false, UnescapePathValues: true, @@ -168,6 +181,8 @@ func New() *Engine { trees: make(methodTrees, 0, 9), delims: render.Delims{Left: "{{", Right: "}}"}, secureJSONPrefix: "while(1);", + trustedProxies: []string{"0.0.0.0/0"}, + trustedCIDRs: defaultTrustedCIDRs, } engine.RouterGroup.engine = engine engine.pool.New = func() interface{} { @@ -186,7 +201,8 @@ func Default() *Engine { func (engine *Engine) allocateContext() *Context { v := make(Params, 0, engine.maxParams) - return &Context{engine: engine, params: &v} + skippedNodes := make([]skippedNode, 0, engine.maxSections) + return &Context{engine: engine, params: &v, skippedNodes: &skippedNodes} } // Delims sets template left and right delims and returns a Engine instance. @@ -249,7 +265,7 @@ func (engine *Engine) NoRoute(handlers ...HandlerFunc) { engine.rebuild404Handlers() } -// NoMethod sets the handlers called when... TODO. +// NoMethod sets the handlers called when Engine.HandleMethodNotAllowed = true. func (engine *Engine) NoMethod(handlers ...HandlerFunc) { engine.noMethod = handlers engine.rebuild405Handlers() @@ -292,6 +308,10 @@ func (engine *Engine) addRoute(method, path string, handlers HandlersChain) { if paramsCount := countParams(path); paramsCount > engine.maxParams { engine.maxParams = paramsCount } + + if sectionsCount := countSections(path); sectionsCount > engine.maxSections { + engine.maxSections = sectionsCount + } } // Routes returns a slice of registered routes, including some useful information, such as: @@ -326,11 +346,11 @@ func iterate(path, method string, routes RoutesInfo, root *node) RoutesInfo { func (engine *Engine) Run(addr ...string) (err error) { defer func() { debugPrintError(err) }() - trustedCIDRs, err := engine.prepareTrustedCIDRs() - if err != nil { - return err + if engine.isUnsafeTrustedProxies() { + debugPrint("[WARNING] You trusted all proxies, this is NOT safe. We recommend you to set a value.\n" + + "Please check https://pkg.go.dev/github.com/gin-gonic/gin#readme-don-t-trust-all-proxies for details.") } - engine.trustedCIDRs = trustedCIDRs + address := resolveAddress(addr) debugPrint("Listening and serving HTTP on %s\n", address) err = http.ListenAndServe(address, engine) @@ -338,12 +358,12 @@ func (engine *Engine) Run(addr ...string) (err error) { } func (engine *Engine) prepareTrustedCIDRs() ([]*net.IPNet, error) { - if engine.TrustedProxies == nil { + if engine.trustedProxies == nil { return nil, nil } - cidr := make([]*net.IPNet, 0, len(engine.TrustedProxies)) - for _, trustedProxy := range engine.TrustedProxies { + cidr := make([]*net.IPNet, 0, len(engine.trustedProxies)) + for _, trustedProxy := range engine.trustedProxies { if !strings.Contains(trustedProxy, "/") { ip := parseIP(trustedProxy) if ip == nil { @@ -366,6 +386,31 @@ func (engine *Engine) prepareTrustedCIDRs() ([]*net.IPNet, error) { return cidr, nil } +// SetTrustedProxies set a list of network origins (IPv4 addresses, +// IPv4 CIDRs, IPv6 addresses or IPv6 CIDRs) from which to trust +// request's headers that contain alternative client IP when +// `(*gin.Engine).ForwardedByClientIP` is `true`. `TrustedProxies` +// feature is enabled by default, and it also trusts all proxies +// by default. If you want to disable this feature, use +// Engine.SetTrustedProxies(nil), then Context.ClientIP() will +// return the remote address directly. +func (engine *Engine) SetTrustedProxies(trustedProxies []string) error { + engine.trustedProxies = trustedProxies + return engine.parseTrustedProxies() +} + +// isUnsafeTrustedProxies compares Engine.trustedCIDRs and defaultTrustedCIDRs, it's not safe if equal (returns true) +func (engine *Engine) isUnsafeTrustedProxies() bool { + return reflect.DeepEqual(engine.trustedCIDRs, defaultTrustedCIDRs) +} + +// parseTrustedProxies parse Engine.trustedProxies to Engine.trustedCIDRs +func (engine *Engine) parseTrustedProxies() error { + trustedCIDRs, err := engine.prepareTrustedCIDRs() + engine.trustedCIDRs = trustedCIDRs + return err +} + // parseIP parse a string representation of an IP and returns a net.IP with the // minimum byte representation or nil if input is invalid. func parseIP(ip string) net.IP { @@ -387,6 +432,11 @@ func (engine *Engine) RunTLS(addr, certFile, keyFile string) (err error) { debugPrint("Listening and serving HTTPS on %s\n", addr) defer func() { debugPrintError(err) }() + if engine.isUnsafeTrustedProxies() { + debugPrint("[WARNING] You trusted all proxies, this is NOT safe. We recommend you to set a value.\n" + + "Please check https://pkg.go.dev/github.com/gin-gonic/gin#readme-don-t-trust-all-proxies for details.") + } + err = http.ListenAndServeTLS(addr, certFile, keyFile, engine) return } @@ -398,6 +448,11 @@ func (engine *Engine) RunUnix(file string) (err error) { debugPrint("Listening and serving HTTP on unix:/%s", file) defer func() { debugPrintError(err) }() + if engine.isUnsafeTrustedProxies() { + debugPrint("[WARNING] You trusted all proxies, this is NOT safe. We recommend you to set a value.\n" + + "Please check https://pkg.go.dev/github.com/gin-gonic/gin#readme-don-t-trust-all-proxies for details.") + } + listener, err := net.Listen("unix", file) if err != nil { return @@ -416,6 +471,11 @@ func (engine *Engine) RunFd(fd int) (err error) { debugPrint("Listening and serving HTTP on fd@%d", fd) defer func() { debugPrintError(err) }() + if engine.isUnsafeTrustedProxies() { + debugPrint("[WARNING] You trusted all proxies, this is NOT safe. We recommend you to set a value.\n" + + "Please check https://pkg.go.dev/github.com/gin-gonic/gin#readme-don-t-trust-all-proxies for details.") + } + f := os.NewFile(uintptr(fd), fmt.Sprintf("fd@%d", fd)) listener, err := net.FileListener(f) if err != nil { @@ -431,6 +491,12 @@ func (engine *Engine) RunFd(fd int) (err error) { func (engine *Engine) RunListener(listener net.Listener) (err error) { debugPrint("Listening and serving HTTP on listener what's bind with address@%s", listener.Addr()) defer func() { debugPrintError(err) }() + + if engine.isUnsafeTrustedProxies() { + debugPrint("[WARNING] You trusted all proxies, this is NOT safe. We recommend you to set a value.\n" + + "Please check https://pkg.go.dev/github.com/gin-gonic/gin#readme-don-t-trust-all-proxies for details.") + } + err = http.Serve(listener, engine) return } @@ -479,7 +545,7 @@ func (engine *Engine) handleHTTPRequest(c *Context) { } root := t[i].root // Find route in tree - value := root.getValue(rPath, c.params, unescape) + value := root.getValue(rPath, c.params, c.skippedNodes, unescape) if value.params != nil { c.Params = *value.params } @@ -507,7 +573,7 @@ func (engine *Engine) handleHTTPRequest(c *Context) { if tree.method == httpMethod { continue } - if value := tree.root.getValue(rPath, nil, unescape); value.handlers != nil { + if value := tree.root.getValue(rPath, nil, c.skippedNodes, unescape); value.handlers != nil { c.handlers = engine.allNoMethod serveError(c, http.StatusMethodNotAllowed, default405Body) return diff --git a/vendor/github.com/gin-gonic/gin/go.mod b/vendor/github.com/gin-gonic/gin/go.mod deleted file mode 100644 index 884ff851..00000000 --- a/vendor/github.com/gin-gonic/gin/go.mod +++ /dev/null @@ -1,14 +0,0 @@ -module github.com/gin-gonic/gin - -go 1.13 - -require ( - github.com/gin-contrib/sse v0.1.0 - github.com/go-playground/validator/v10 v10.4.1 - github.com/golang/protobuf v1.3.3 - github.com/json-iterator/go v1.1.9 - github.com/mattn/go-isatty v0.0.12 - github.com/stretchr/testify v1.4.0 - github.com/ugorji/go/codec v1.1.7 - gopkg.in/yaml.v2 v2.2.8 -) diff --git a/vendor/github.com/gin-gonic/gin/go.sum b/vendor/github.com/gin-gonic/gin/go.sum deleted file mode 100644 index a64b3319..00000000 --- a/vendor/github.com/gin-gonic/gin/go.sum +++ /dev/null @@ -1,52 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/gin-gonic/gin/tree.go b/vendor/github.com/gin-gonic/gin/tree.go index 0d082d05..158a3390 100644 --- a/vendor/github.com/gin-gonic/gin/tree.go +++ b/vendor/github.com/gin-gonic/gin/tree.go @@ -17,6 +17,7 @@ import ( var ( strColon = []byte(":") strStar = []byte("*") + strSlash = []byte("/") ) // Param is a single URL parameter, consisting of a key and a value. @@ -98,6 +99,11 @@ func countParams(path string) uint16 { return n } +func countSections(path string) uint16 { + s := bytesconv.StringToBytes(path) + return uint16(bytes.Count(s, strSlash)) +} + type nodeType uint8 const ( @@ -118,11 +124,6 @@ type node struct { fullPath string } -type skip struct { - path string - paramNode *node -} - // Increments priority of the given child and reorders if necessary func (n *node) incrementChildPrio(pos int) int { cs := n.children @@ -399,13 +400,19 @@ type nodeValue struct { fullPath string } +type skippedNode struct { + path string + node *node + paramsCount int16 +} + // Returns the handle registered with the given path (key). The values of // wildcards are saved to a map. // If no handle can be found, a TSR (trailing slash redirect) recommendation is // made if a handle exists with an extra (without the) trailing slash for the // given path. -func (n *node) getValue(path string, params *Params, unescape bool) (value nodeValue) { - var skipped *skip +func (n *node) getValue(path string, params *Params, skippedNodes *[]skippedNode, unescape bool) (value nodeValue) { + var globalParamsCount int16 walk: // Outer loop for walking the tree for { @@ -418,10 +425,13 @@ walk: // Outer loop for walking the tree idxc := path[0] for i, c := range []byte(n.indices) { if c == idxc { - if strings.HasPrefix(n.children[len(n.children)-1].path, ":") { - skipped = &skip{ + // strings.HasPrefix(n.children[len(n.children)-1].path, ":") == n.wildChild + if n.wildChild { + index := len(*skippedNodes) + *skippedNodes = (*skippedNodes)[:index+1] + (*skippedNodes)[index] = skippedNode{ path: prefix + path, - paramNode: &node{ + node: &node{ path: n.path, wildChild: n.wildChild, nType: n.nType, @@ -430,6 +440,7 @@ walk: // Outer loop for walking the tree handlers: n.handlers, fullPath: n.fullPath, }, + paramsCount: globalParamsCount, } } @@ -438,20 +449,41 @@ walk: // Outer loop for walking the tree } } - // If there is no wildcard pattern, recommend a redirection if !n.wildChild { + // If the path at the end of the loop is not equal to '/' and the current node has no child nodes + // the current node needs to roll back to last vaild skippedNode + if path != "/" { + for l := len(*skippedNodes); l > 0; { + skippedNode := (*skippedNodes)[l-1] + *skippedNodes = (*skippedNodes)[:l-1] + if strings.HasSuffix(skippedNode.path, path) { + path = skippedNode.path + n = skippedNode.node + if value.params != nil { + *value.params = (*value.params)[:skippedNode.paramsCount] + } + globalParamsCount = skippedNode.paramsCount + continue walk + } + } + } + // Nothing found. // We can recommend to redirect to the same URL without a // trailing slash if a leaf exists for that path. - value.tsr = (path == "/" && n.handlers != nil) + value.tsr = path == "/" && n.handlers != nil return } // Handle wildcard child, which is always at the end of the array n = n.children[len(n.children)-1] + globalParamsCount++ switch n.nType { case param: + // fix truncate the parameter + // tree_test.go line: 204 + // Find param end (either '/' or path end) end := 0 for end < len(path) && path[end] != '/' { @@ -459,7 +491,7 @@ walk: // Outer loop for walking the tree } // Save param value - if params != nil { + if params != nil && cap(*params) > 0 { if value.params == nil { value.params = params } @@ -487,7 +519,7 @@ walk: // Outer loop for walking the tree } // ... but we can't - value.tsr = (len(path) == end+1) + value.tsr = len(path) == end+1 return } @@ -499,7 +531,7 @@ walk: // Outer loop for walking the tree // No handle found. Check if a handle for this path + a // trailing slash exists for TSR recommendation n = n.children[0] - value.tsr = (n.path == "/" && n.handlers != nil) + value.tsr = n.path == "/" && n.handlers != nil } return @@ -535,6 +567,24 @@ walk: // Outer loop for walking the tree } if path == prefix { + // If the current path does not equal '/' and the node does not have a registered handle and the most recently matched node has a child node + // the current node needs to roll back to last vaild skippedNode + if n.handlers == nil && path != "/" { + for l := len(*skippedNodes); l > 0; { + skippedNode := (*skippedNodes)[l-1] + *skippedNodes = (*skippedNodes)[:l-1] + if strings.HasSuffix(skippedNode.path, path) { + path = skippedNode.path + n = skippedNode.node + if value.params != nil { + *value.params = (*value.params)[:skippedNode.paramsCount] + } + globalParamsCount = skippedNode.paramsCount + continue walk + } + } + // n = latestNode.children[len(latestNode.children)-1] + } // We should have reached the node containing the handle. // Check if this node has a handle registered. if value.handlers = n.handlers; value.handlers != nil { @@ -564,18 +614,29 @@ walk: // Outer loop for walking the tree return } - if path != "/" && skipped != nil && strings.HasSuffix(skipped.path, path) { - path = skipped.path - n = skipped.paramNode - skipped = nil - continue walk - } - // Nothing found. We can recommend to redirect to the same URL with an // extra trailing slash if a leaf exists for that path - value.tsr = (path == "/") || + value.tsr = path == "/" || (len(prefix) == len(path)+1 && prefix[len(path)] == '/' && path == prefix[:len(prefix)-1] && n.handlers != nil) + + // roll back to last valid skippedNode + if !value.tsr && path != "/" { + for l := len(*skippedNodes); l > 0; { + skippedNode := (*skippedNodes)[l-1] + *skippedNodes = (*skippedNodes)[:l-1] + if strings.HasSuffix(skippedNode.path, path) { + path = skippedNode.path + n = skippedNode.node + if value.params != nil { + *value.params = (*value.params)[:skippedNode.paramsCount] + } + globalParamsCount = skippedNode.paramsCount + continue walk + } + } + } + return } } diff --git a/vendor/github.com/gin-gonic/gin/version.go b/vendor/github.com/gin-gonic/gin/version.go index a80ab69a..4b69b9b9 100644 --- a/vendor/github.com/gin-gonic/gin/version.go +++ b/vendor/github.com/gin-gonic/gin/version.go @@ -5,4 +5,4 @@ package gin // Version is the current gin framework's version. -const Version = "v1.7.2" +const Version = "v1.7.7" diff --git a/vendor/github.com/go-openapi/jsonpointer/go.mod b/vendor/github.com/go-openapi/jsonpointer/go.mod deleted file mode 100644 index 3e45e225..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/go-openapi/jsonpointer - -require ( - github.com/go-openapi/swag v0.19.5 - github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect - github.com/stretchr/testify v1.3.0 -) - -go 1.13 diff --git a/vendor/github.com/go-openapi/jsonpointer/go.sum b/vendor/github.com/go-openapi/jsonpointer/go.sum deleted file mode 100644 index 953d4f35..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/go.sum +++ /dev/null @@ -1,24 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonreference/go.mod b/vendor/github.com/go-openapi/jsonreference/go.mod deleted file mode 100644 index e6c2ec4d..00000000 --- a/vendor/github.com/go-openapi/jsonreference/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/go-openapi/jsonreference - -require ( - github.com/PuerkitoBio/purell v1.1.1 - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/go-openapi/jsonpointer v0.19.3 - github.com/stretchr/testify v1.3.0 - golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect - golang.org/x/text v0.3.3 // indirect -) - -go 1.13 diff --git a/vendor/github.com/go-openapi/jsonreference/go.sum b/vendor/github.com/go-openapi/jsonreference/go.sum deleted file mode 100644 index b37f873e..00000000 --- a/vendor/github.com/go-openapi/jsonreference/go.sum +++ /dev/null @@ -1,38 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml index e8ceca44..2281a07b 100644 --- a/vendor/github.com/go-openapi/spec/.travis.yml +++ b/vendor/github.com/go-openapi/spec/.travis.yml @@ -1,7 +1,7 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- 1.14.x +- 1.16.x - 1.x arch: - amd64 diff --git a/vendor/github.com/go-openapi/spec/go.mod b/vendor/github.com/go-openapi/spec/go.mod deleted file mode 100644 index 63d9e82e..00000000 --- a/vendor/github.com/go-openapi/spec/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/go-openapi/spec - -require ( - github.com/go-openapi/jsonpointer v0.19.5 - github.com/go-openapi/jsonreference v0.19.5 - github.com/go-openapi/swag v0.19.14 - github.com/stretchr/testify v1.6.1 - golang.org/x/net v0.0.0-20210119194325-5f4716e94777 // indirect - golang.org/x/text v0.3.5 // indirect - gopkg.in/yaml.v2 v2.4.0 -) - -go 1.13 diff --git a/vendor/github.com/go-openapi/spec/go.sum b/vendor/github.com/go-openapi/spec/go.sum deleted file mode 100644 index 9e86ed98..00000000 --- a/vendor/github.com/go-openapi/spec/go.sum +++ /dev/null @@ -1,65 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go index 0059b99a..b81175af 100644 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -168,7 +168,14 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) normalized := normalizeBase(pth) debugLog("loading doc from: %s", normalized) - data, fromCache := r.cache.Get(normalized) + unescaped, err := url.PathUnescape(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + u := url.URL{Path: unescaped} + + data, fromCache := r.cache.Get(u.RequestURI()) if fromCache { return data, toFetch, fromCache, nil } diff --git a/vendor/github.com/go-openapi/swag/go.mod b/vendor/github.com/go-openapi/swag/go.mod deleted file mode 100644 index fb29b65b..00000000 --- a/vendor/github.com/go-openapi/swag/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module github.com/go-openapi/swag - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/mailru/easyjson v0.7.6 - github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect - github.com/stretchr/testify v1.6.1 - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect - gopkg.in/yaml.v2 v2.4.0 - gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect -) - -replace github.com/golang/lint => golang.org/x/lint v0.0.0-20190409202823-959b441ac422 - -replace sourcegraph.com/sourcegraph/go-diff => github.com/sourcegraph/go-diff v0.5.1 - -go 1.11 diff --git a/vendor/github.com/go-openapi/swag/go.sum b/vendor/github.com/go-openapi/swag/go.sum deleted file mode 100644 index a45da809..00000000 --- a/vendor/github.com/go-openapi/swag/go.sum +++ /dev/null @@ -1,29 +0,0 @@ -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/go-playground/locales/README.md b/vendor/github.com/go-playground/locales/README.md index ba1b0680..5b0694fd 100644 --- a/vendor/github.com/go-playground/locales/README.md +++ b/vendor/github.com/go-playground/locales/README.md @@ -1,5 +1,5 @@ ## locales -![Project status](https://img.shields.io/badge/version-0.13.0-green.svg) +![Project status](https://img.shields.io/badge/version-0.14.0-green.svg) [![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales) [![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales) @@ -11,7 +11,7 @@ an i18n package; these were built for use with, but not exclusive to, [Universal Features -------- -- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v31.0.1 +- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1 - [x] Contains Cardinal, Ordinal and Range Plural Rules - [x] Contains Month, Weekday and Timezone translations built in - [x] Contains Date & Time formatting functions diff --git a/vendor/github.com/go-playground/locales/currency/currency.go b/vendor/github.com/go-playground/locales/currency/currency.go index cdaba596..b5a95fb0 100644 --- a/vendor/github.com/go-playground/locales/currency/currency.go +++ b/vendor/github.com/go-playground/locales/currency/currency.go @@ -176,6 +176,7 @@ const ( MNT MOP MRO + MRU MTL MTP MUR @@ -262,9 +263,11 @@ const ( UYI UYP UYU + UYW UZS VEB VEF + VES VND VNN VUV diff --git a/vendor/github.com/go-playground/locales/go.mod b/vendor/github.com/go-playground/locales/go.mod deleted file mode 100644 index 34ab6f23..00000000 --- a/vendor/github.com/go-playground/locales/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/go-playground/locales - -go 1.13 - -require golang.org/x/text v0.3.2 diff --git a/vendor/github.com/go-playground/locales/go.sum b/vendor/github.com/go-playground/locales/go.sum deleted file mode 100644 index 63c9200f..00000000 --- a/vendor/github.com/go-playground/locales/go.sum +++ /dev/null @@ -1,3 +0,0 @@ -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/go-playground/universal-translator/Makefile b/vendor/github.com/go-playground/universal-translator/Makefile new file mode 100644 index 00000000..ec3455bd --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/Makefile @@ -0,0 +1,18 @@ +GOCMD=GO111MODULE=on go + +linters-install: + @golangci-lint --version >/dev/null 2>&1 || { \ + echo "installing linting tools..."; \ + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.41.1; \ + } + +lint: linters-install + golangci-lint run + +test: + $(GOCMD) test -cover -race ./... + +bench: + $(GOCMD) test -bench=. -benchmem ./... + +.PHONY: test lint linters-install \ No newline at end of file diff --git a/vendor/github.com/go-playground/universal-translator/README.md b/vendor/github.com/go-playground/universal-translator/README.md index 071f33ab..46dec6d2 100644 --- a/vendor/github.com/go-playground/universal-translator/README.md +++ b/vendor/github.com/go-playground/universal-translator/README.md @@ -1,5 +1,5 @@ ## universal-translator -![Project status](https://img.shields.io/badge/version-0.17.0-green.svg) +![Project status](https://img.shields.io/badge/version-0.18.0-green.svg) [![Build Status](https://travis-ci.org/go-playground/universal-translator.svg?branch=master)](https://travis-ci.org/go-playground/universal-translator) [![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator) @@ -18,7 +18,7 @@ use in your applications. Features -------- -- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v30.0.3 +- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1 - [x] Contains Cardinal, Ordinal and Range Plural Rules - [x] Contains Month, Weekday and Timezone translations built in - [x] Contains Date & Time formatting functions @@ -51,7 +51,7 @@ Please see https://godoc.org/github.com/go-playground/universal-translator for u File formatting -------------- -All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained withing the same file(s); +All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained within the same file(s); they are only separated for easy viewing. ##### Examples: diff --git a/vendor/github.com/go-playground/universal-translator/go.mod b/vendor/github.com/go-playground/universal-translator/go.mod deleted file mode 100644 index 8079590f..00000000 --- a/vendor/github.com/go-playground/universal-translator/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/go-playground/universal-translator - -go 1.13 - -require github.com/go-playground/locales v0.13.0 diff --git a/vendor/github.com/go-playground/universal-translator/go.sum b/vendor/github.com/go-playground/universal-translator/go.sum deleted file mode 100644 index cbbf3241..00000000 --- a/vendor/github.com/go-playground/universal-translator/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/go-playground/universal-translator/import_export.go b/vendor/github.com/go-playground/universal-translator/import_export.go index 7bd76f26..1216f192 100644 --- a/vendor/github.com/go-playground/universal-translator/import_export.go +++ b/vendor/github.com/go-playground/universal-translator/import_export.go @@ -257,6 +257,8 @@ func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader i func stringToPR(s string) locales.PluralRule { switch s { + case "Zero": + return locales.PluralRuleZero case "One": return locales.PluralRuleOne case "Two": diff --git a/vendor/github.com/go-playground/universal-translator/translator.go b/vendor/github.com/go-playground/universal-translator/translator.go index cfafce8a..24b18db9 100644 --- a/vendor/github.com/go-playground/universal-translator/translator.go +++ b/vendor/github.com/go-playground/universal-translator/translator.go @@ -159,13 +159,13 @@ func (t *translator) AddCardinal(key interface{}, text string, rule locales.Plur } } else { - tarr = make([]*transText, 7, 7) + tarr = make([]*transText, 7) t.cardinalTanslations[key] = tarr } trans := &transText{ text: text, - indexes: make([]int, 2, 2), + indexes: make([]int, 2), } tarr[rule] = trans @@ -211,13 +211,13 @@ func (t *translator) AddOrdinal(key interface{}, text string, rule locales.Plura } } else { - tarr = make([]*transText, 7, 7) + tarr = make([]*transText, 7) t.ordinalTanslations[key] = tarr } trans := &transText{ text: text, - indexes: make([]int, 2, 2), + indexes: make([]int, 2), } tarr[rule] = trans @@ -261,13 +261,13 @@ func (t *translator) AddRange(key interface{}, text string, rule locales.PluralR } } else { - tarr = make([]*transText, 7, 7) + tarr = make([]*transText, 7) t.rangeTanslations[key] = tarr } trans := &transText{ text: text, - indexes: make([]int, 4, 4), + indexes: make([]int, 4), } tarr[rule] = trans diff --git a/vendor/github.com/go-playground/validator/v10/Makefile b/vendor/github.com/go-playground/validator/v10/Makefile index 164e8bb9..ec3455bd 100644 --- a/vendor/github.com/go-playground/validator/v10/Makefile +++ b/vendor/github.com/go-playground/validator/v10/Makefile @@ -3,7 +3,7 @@ GOCMD=GO111MODULE=on go linters-install: @golangci-lint --version >/dev/null 2>&1 || { \ echo "installing linting tools..."; \ - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.39.0; \ + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.41.1; \ } lint: linters-install diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index 15d9fbb9..f56cff15 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================= [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.6.1-green.svg) +![Project status](https://img.shields.io/badge/version-10.9.0-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) @@ -27,11 +27,11 @@ Installation Use go get. - go get github.com/go-playground/validator + go get github.com/go-playground/validator/v10 Then import the validator package into your own code. - import "github.com/go-playground/validator" + import "github.com/go-playground/validator/v10" Error Return Value ------- @@ -43,7 +43,7 @@ They return type error to avoid the issue discussed in the following, where err * http://stackoverflow.com/a/29138676/3158232 * https://github.com/go-playground/validator/issues/134 -Validator only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so: +Validator returns only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so: ```go err := validate.Struct(mystruct) @@ -100,7 +100,7 @@ Baked-in Validations | hostname_rfc1123 | Hostname RFC 1123 | | ip | Internet Protocol Address IP | | ip4_addr | Internet Protocol Address IPv4 | -| ip6_addr |Internet Protocol Address IPv6 | +| ip6_addr | Internet Protocol Address IPv6 | | ip_addr | Internet Protocol Address IP | | ipv4 | Internet Protocol Address IPv4 | | ipv6 | Internet Protocol Address IPv6 | @@ -126,15 +126,21 @@ Baked-in Validations | alphanumunicode | Alphanumeric Unicode | | alphaunicode | Alpha Unicode | | ascii | ASCII | +| boolean | Boolean | | contains | Contains | | containsany | Contains Any | | containsrune | Contains Rune | +| endsnotwith | Ends With | | endswith | Ends With | +| excludes | Excludes | +| excludesall | Excludes All | +| excludesrune | Excludes Rune | | lowercase | Lowercase | | multibyte | Multi-Byte Characters | | number | NOT DOCUMENTED IN doc.go | | numeric | Numeric | | printascii | Printable ASCII | +| startsnotwith | Starts Not With | | startswith | Starts With | | uppercase | Uppercase | @@ -143,6 +149,8 @@ Baked-in Validations | - | - | | base64 | Base64 String | | base64url | Base64URL String | +| bic | Business Identifier Code (ISO 9362) | +| bcp47_language_tag | Language tag (BCP 47) | | btc_addr | Bitcoin Address | | btc_addr_bech32 | Bitcoin Bech32 Address (segwit) | | datetime | Datetime | @@ -158,12 +166,21 @@ Baked-in Validations | isbn | International Standard Book Number | | isbn10 | International Standard Book Number 10 | | isbn13 | International Standard Book Number 13 | +| iso3166_1_alpha2 | Two-letter country code (ISO 3166-1 alpha-2) | +| iso3166_1_alpha3 | Three-letter country code (ISO 3166-1 alpha-3) | +| iso3166_1_alpha_numeric | Numeric country code (ISO 3166-1 numeric) | +| iso3166_2 | Country subdivision code (ISO 3166-2) | +| iso4217 | Currency code (ISO 4217) | | json | JSON | +| jwt | JSON Web Token (JWT) | | latitude | Latitude | | longitude | Longitude | +| postcode_iso3166_alpha2 | Postcode | +| postcode_iso3166_alpha2_field | Postcode | | rgb | RGB String | | rgba | RGBA String | | ssn | Social Security Number SSN | +| timezone | Timezone | | uuid | Universally Unique Identifier UUID | | uuid3 | Universally Unique Identifier UUID v3 | | uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 | @@ -178,7 +195,7 @@ Baked-in Validations | - | - | | eq | Equals | | gt | Greater than| -| gte |Greater than or equal | +| gte | Greater than or equal | | lt | Less Than | | lte | Less Than or Equal | | ne | Not Equal | @@ -187,10 +204,6 @@ Baked-in Validations | Tag | Description | | - | - | | dir | Directory | -| endswith | Ends With | -| excludes | Excludes | -| excludesall | Excludes All | -| excludesrune | Excludes Rune | | file | File path | | isdefault | Is Default | | len | Length | @@ -210,6 +223,12 @@ Baked-in Validations | excluded_without_all | Excluded Without All | | unique | Unique | +#### Aliases: +| Tag | Description | +| - | - | +| iscolor | hexcolor\|rgb\|rgba\|hsl\|hsla | +| country_code | iso3166_1_alpha2\|iso3166_1_alpha3\|iso3166_1_alpha_numeric | + Benchmarks ------ ###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64 diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go index c46e6cd6..f5fd2391 100644 --- a/vendor/github.com/go-playground/validator/v10/baked_in.go +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -56,7 +56,7 @@ var ( isdefault: {}, } - // BakedInAliasValidators is a default mapping of a single validation tag that + // bakedInAliases is a default mapping of a single validation tag that // defines a common or complex set of validation(s) to simplify // adding validation to structs. bakedInAliases = map[string]string{ @@ -64,7 +64,7 @@ var ( "country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric", } - // BakedInValidators is the default map of ValidationFunc + // bakedInValidators is the default map of ValidationFunc // you can add, remove or even replace items to suite your needs, // or even disregard and use your own map if so desired. bakedInValidators = map[string]Func{ @@ -107,6 +107,7 @@ var ( "alphanum": isAlphanum, "alphaunicode": isAlphaUnicode, "alphanumunicode": isAlphanumUnicode, + "boolean": isBoolean, "numeric": isNumeric, "number": isNumber, "hexadecimal": isHexadecimal, @@ -181,6 +182,7 @@ var ( "url_encoded": isURLEncoded, "dir": isDir, "json": isJSON, + "jwt": isJWT, "hostname_port": isHostnamePort, "lowercase": isLowercase, "uppercase": isUppercase, @@ -189,6 +191,9 @@ var ( "iso3166_1_alpha2": isIso3166Alpha2, "iso3166_1_alpha3": isIso3166Alpha3, "iso3166_1_alpha_numeric": isIso3166AlphaNumeric, + "iso3166_2": isIso31662, + "iso4217": isIso4217, + "iso4217_numeric": isIso4217Numeric, "bcp47_language_tag": isBCP47LanguageTag, "postcode_iso3166_alpha2": isPostcodeByIso3166Alpha2, "postcode_iso3166_alpha2_field": isPostcodeByIso3166Alpha2Field, @@ -301,7 +306,7 @@ func isUnique(fl FieldLevel) bool { } } -// IsMAC is the validation function for validating if the field's value is a valid MAC address. +// isMAC is the validation function for validating if the field's value is a valid MAC address. func isMAC(fl FieldLevel) bool { _, err := net.ParseMAC(fl.Field().String()) @@ -309,7 +314,7 @@ func isMAC(fl FieldLevel) bool { return err == nil } -// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address. +// isCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address. func isCIDRv4(fl FieldLevel) bool { ip, _, err := net.ParseCIDR(fl.Field().String()) @@ -317,7 +322,7 @@ func isCIDRv4(fl FieldLevel) bool { return err == nil && ip.To4() != nil } -// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address. +// isCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address. func isCIDRv6(fl FieldLevel) bool { ip, _, err := net.ParseCIDR(fl.Field().String()) @@ -325,7 +330,7 @@ func isCIDRv6(fl FieldLevel) bool { return err == nil && ip.To4() == nil } -// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address. +// isCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address. func isCIDR(fl FieldLevel) bool { _, _, err := net.ParseCIDR(fl.Field().String()) @@ -333,7 +338,7 @@ func isCIDR(fl FieldLevel) bool { return err == nil } -// IsIPv4 is the validation function for validating if a value is a valid v4 IP address. +// isIPv4 is the validation function for validating if a value is a valid v4 IP address. func isIPv4(fl FieldLevel) bool { ip := net.ParseIP(fl.Field().String()) @@ -341,7 +346,7 @@ func isIPv4(fl FieldLevel) bool { return ip != nil && ip.To4() != nil } -// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address. +// isIPv6 is the validation function for validating if the field's value is a valid v6 IP address. func isIPv6(fl FieldLevel) bool { ip := net.ParseIP(fl.Field().String()) @@ -349,7 +354,7 @@ func isIPv6(fl FieldLevel) bool { return ip != nil && ip.To4() == nil } -// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address. +// isIP is the validation function for validating if the field's value is a valid v4 or v6 IP address. func isIP(fl FieldLevel) bool { ip := net.ParseIP(fl.Field().String()) @@ -357,7 +362,7 @@ func isIP(fl FieldLevel) bool { return ip != nil } -// IsSSN is the validation function for validating if the field's value is a valid SSN. +// isSSN is the validation function for validating if the field's value is a valid SSN. func isSSN(fl FieldLevel) bool { field := fl.Field() @@ -369,7 +374,7 @@ func isSSN(fl FieldLevel) bool { return sSNRegex.MatchString(field.String()) } -// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate. +// isLongitude is the validation function for validating if the field's value is a valid longitude coordinate. func isLongitude(fl FieldLevel) bool { field := fl.Field() @@ -392,7 +397,7 @@ func isLongitude(fl FieldLevel) bool { return longitudeRegex.MatchString(v) } -// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate. +// isLatitude is the validation function for validating if the field's value is a valid latitude coordinate. func isLatitude(fl FieldLevel) bool { field := fl.Field() @@ -415,7 +420,7 @@ func isLatitude(fl FieldLevel) bool { return latitudeRegex.MatchString(v) } -// IsDataURI is the validation function for validating if the field's value is a valid data URI. +// isDataURI is the validation function for validating if the field's value is a valid data URI. func isDataURI(fl FieldLevel) bool { uri := strings.SplitN(fl.Field().String(), ",", 2) @@ -431,7 +436,7 @@ func isDataURI(fl FieldLevel) bool { return base64Regex.MatchString(uri[1]) } -// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. +// hasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. func hasMultiByteCharacter(fl FieldLevel) bool { field := fl.Field() @@ -443,62 +448,62 @@ func hasMultiByteCharacter(fl FieldLevel) bool { return multibyteRegex.MatchString(field.String()) } -// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. +// isPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. func isPrintableASCII(fl FieldLevel) bool { return printableASCIIRegex.MatchString(fl.Field().String()) } -// IsASCII is the validation function for validating if the field's value is a valid ASCII character. +// isASCII is the validation function for validating if the field's value is a valid ASCII character. func isASCII(fl FieldLevel) bool { return aSCIIRegex.MatchString(fl.Field().String()) } -// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID. +// isUUID5 is the validation function for validating if the field's value is a valid v5 UUID. func isUUID5(fl FieldLevel) bool { return uUID5Regex.MatchString(fl.Field().String()) } -// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID. +// isUUID4 is the validation function for validating if the field's value is a valid v4 UUID. func isUUID4(fl FieldLevel) bool { return uUID4Regex.MatchString(fl.Field().String()) } -// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID. +// isUUID3 is the validation function for validating if the field's value is a valid v3 UUID. func isUUID3(fl FieldLevel) bool { return uUID3Regex.MatchString(fl.Field().String()) } -// IsUUID is the validation function for validating if the field's value is a valid UUID of any version. +// isUUID is the validation function for validating if the field's value is a valid UUID of any version. func isUUID(fl FieldLevel) bool { return uUIDRegex.MatchString(fl.Field().String()) } -// IsUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID. +// isUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID. func isUUID5RFC4122(fl FieldLevel) bool { return uUID5RFC4122Regex.MatchString(fl.Field().String()) } -// IsUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID. +// isUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID. func isUUID4RFC4122(fl FieldLevel) bool { return uUID4RFC4122Regex.MatchString(fl.Field().String()) } -// IsUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID. +// isUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID. func isUUID3RFC4122(fl FieldLevel) bool { return uUID3RFC4122Regex.MatchString(fl.Field().String()) } -// IsUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version. +// isUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version. func isUUIDRFC4122(fl FieldLevel) bool { return uUIDRFC4122Regex.MatchString(fl.Field().String()) } -// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. +// isISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. func isISBN(fl FieldLevel) bool { return isISBN10(fl) || isISBN13(fl) } -// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN. +// isISBN13 is the validation function for validating if the field's value is a valid v13 ISBN. func isISBN13(fl FieldLevel) bool { s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4) @@ -519,7 +524,7 @@ func isISBN13(fl FieldLevel) bool { return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0 } -// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN. +// isISBN10 is the validation function for validating if the field's value is a valid v10 ISBN. func isISBN10(fl FieldLevel) bool { s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3) @@ -544,7 +549,7 @@ func isISBN10(fl FieldLevel) bool { return checksum%11 == 0 } -// IsEthereumAddress is the validation function for validating if the field's value is a valid Ethereum address. +// isEthereumAddress is the validation function for validating if the field's value is a valid Ethereum address. func isEthereumAddress(fl FieldLevel) bool { address := fl.Field().String() @@ -575,7 +580,7 @@ func isEthereumAddress(fl FieldLevel) bool { return true } -// IsBitcoinAddress is the validation function for validating if the field's value is a valid btc address +// isBitcoinAddress is the validation function for validating if the field's value is a valid btc address func isBitcoinAddress(fl FieldLevel) bool { address := fl.Field().String() @@ -612,7 +617,7 @@ func isBitcoinAddress(fl FieldLevel) bool { return validchecksum == computedchecksum } -// IsBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address +// isBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address func isBitcoinBech32Address(fl FieldLevel) bool { address := fl.Field().String() @@ -692,22 +697,22 @@ func isBitcoinBech32Address(fl FieldLevel) bool { return true } -// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified within the param. +// excludesRune is the validation function for validating that the field's value does not contain the rune specified within the param. func excludesRune(fl FieldLevel) bool { return !containsRune(fl) } -// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param. +// excludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param. func excludesAll(fl FieldLevel) bool { return !containsAny(fl) } -// Excludes is the validation function for validating that the field's value does not contain the text specified within the param. +// excludes is the validation function for validating that the field's value does not contain the text specified within the param. func excludes(fl FieldLevel) bool { return !contains(fl) } -// ContainsRune is the validation function for validating that the field's value contains the rune specified within the param. +// containsRune is the validation function for validating that the field's value contains the rune specified within the param. func containsRune(fl FieldLevel) bool { r, _ := utf8.DecodeRuneInString(fl.Param()) @@ -715,37 +720,37 @@ func containsRune(fl FieldLevel) bool { return strings.ContainsRune(fl.Field().String(), r) } -// ContainsAny is the validation function for validating that the field's value contains any of the characters specified within the param. +// containsAny is the validation function for validating that the field's value contains any of the characters specified within the param. func containsAny(fl FieldLevel) bool { return strings.ContainsAny(fl.Field().String(), fl.Param()) } -// Contains is the validation function for validating that the field's value contains the text specified within the param. +// contains is the validation function for validating that the field's value contains the text specified within the param. func contains(fl FieldLevel) bool { return strings.Contains(fl.Field().String(), fl.Param()) } -// StartsWith is the validation function for validating that the field's value starts with the text specified within the param. +// startsWith is the validation function for validating that the field's value starts with the text specified within the param. func startsWith(fl FieldLevel) bool { return strings.HasPrefix(fl.Field().String(), fl.Param()) } -// EndsWith is the validation function for validating that the field's value ends with the text specified within the param. +// endsWith is the validation function for validating that the field's value ends with the text specified within the param. func endsWith(fl FieldLevel) bool { return strings.HasSuffix(fl.Field().String(), fl.Param()) } -// StartsNotWith is the validation function for validating that the field's value does not start with the text specified within the param. +// startsNotWith is the validation function for validating that the field's value does not start with the text specified within the param. func startsNotWith(fl FieldLevel) bool { return !startsWith(fl) } -// EndsNotWith is the validation function for validating that the field's value does not end with the text specified within the param. +// endsNotWith is the validation function for validating that the field's value does not end with the text specified within the param. func endsNotWith(fl FieldLevel) bool { return !endsWith(fl) } -// FieldContains is the validation function for validating if the current field's value contains the field specified by the param's value. +// fieldContains is the validation function for validating if the current field's value contains the field specified by the param's value. func fieldContains(fl FieldLevel) bool { field := fl.Field() @@ -758,7 +763,7 @@ func fieldContains(fl FieldLevel) bool { return strings.Contains(field.String(), currentField.String()) } -// FieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value. +// fieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value. func fieldExcludes(fl FieldLevel) bool { field := fl.Field() @@ -770,7 +775,7 @@ func fieldExcludes(fl FieldLevel) bool { return !strings.Contains(field.String(), currentField.String()) } -// IsNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value. +// isNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value. func isNeField(fl FieldLevel) bool { field := fl.Field() @@ -822,12 +827,12 @@ func isNeField(fl FieldLevel) bool { return field.String() != currentField.String() } -// IsNe is the validation function for validating that the field's value does not equal the provided param value. +// isNe is the validation function for validating that the field's value does not equal the provided param value. func isNe(fl FieldLevel) bool { return !isEq(fl) } -// IsLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value. +// isLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value. func isLteCrossStructField(fl FieldLevel) bool { field := fl.Field() @@ -874,7 +879,7 @@ func isLteCrossStructField(fl FieldLevel) bool { return field.String() <= topField.String() } -// IsLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value. +// isLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value. // NOTE: This is exposed for use within your own custom functions and not intended to be called directly. func isLtCrossStructField(fl FieldLevel) bool { @@ -922,7 +927,7 @@ func isLtCrossStructField(fl FieldLevel) bool { return field.String() < topField.String() } -// IsGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value. +// isGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value. func isGteCrossStructField(fl FieldLevel) bool { field := fl.Field() @@ -969,7 +974,7 @@ func isGteCrossStructField(fl FieldLevel) bool { return field.String() >= topField.String() } -// IsGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value. +// isGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value. func isGtCrossStructField(fl FieldLevel) bool { field := fl.Field() @@ -1016,7 +1021,7 @@ func isGtCrossStructField(fl FieldLevel) bool { return field.String() > topField.String() } -// IsNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value. +// isNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value. func isNeCrossStructField(fl FieldLevel) bool { field := fl.Field() @@ -1066,7 +1071,7 @@ func isNeCrossStructField(fl FieldLevel) bool { return topField.String() != field.String() } -// IsEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value. +// isEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value. func isEqCrossStructField(fl FieldLevel) bool { field := fl.Field() @@ -1116,7 +1121,7 @@ func isEqCrossStructField(fl FieldLevel) bool { return topField.String() == field.String() } -// IsEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value. +// isEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value. func isEqField(fl FieldLevel) bool { field := fl.Field() @@ -1167,7 +1172,7 @@ func isEqField(fl FieldLevel) bool { return field.String() == currentField.String() } -// IsEq is the validation function for validating if the current field's value is equal to the param's value. +// isEq is the validation function for validating if the current field's value is equal to the param's value. func isEq(fl FieldLevel) bool { field := fl.Field() @@ -1248,17 +1253,17 @@ func isPostcodeByIso3166Alpha2Field(fl FieldLevel) bool { return reg.MatchString(field.String()) } -// IsBase64 is the validation function for validating if the current field's value is a valid base 64. +// isBase64 is the validation function for validating if the current field's value is a valid base 64. func isBase64(fl FieldLevel) bool { return base64Regex.MatchString(fl.Field().String()) } -// IsBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string. +// isBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string. func isBase64URL(fl FieldLevel) bool { return base64URLRegex.MatchString(fl.Field().String()) } -// IsURI is the validation function for validating if the current field's value is a valid URI. +// isURI is the validation function for validating if the current field's value is a valid URI. func isURI(fl FieldLevel) bool { field := fl.Field() @@ -1287,7 +1292,7 @@ func isURI(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } -// IsURL is the validation function for validating if the current field's value is a valid URL. +// isURL is the validation function for validating if the current field's value is a valid URL. func isURL(fl FieldLevel) bool { field := fl.Field() @@ -1339,7 +1344,7 @@ func isUrnRFC2141(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } -// IsFile is the validation function for validating if the current field's value is a valid file path. +// isFile is the validation function for validating if the current field's value is a valid file path. func isFile(fl FieldLevel) bool { field := fl.Field() @@ -1356,47 +1361,47 @@ func isFile(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } -// IsE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number. +// isE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number. func isE164(fl FieldLevel) bool { return e164Regex.MatchString(fl.Field().String()) } -// IsEmail is the validation function for validating if the current field's value is a valid email address. +// isEmail is the validation function for validating if the current field's value is a valid email address. func isEmail(fl FieldLevel) bool { return emailRegex.MatchString(fl.Field().String()) } -// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color. +// isHSLA is the validation function for validating if the current field's value is a valid HSLA color. func isHSLA(fl FieldLevel) bool { return hslaRegex.MatchString(fl.Field().String()) } -// IsHSL is the validation function for validating if the current field's value is a valid HSL color. +// isHSL is the validation function for validating if the current field's value is a valid HSL color. func isHSL(fl FieldLevel) bool { return hslRegex.MatchString(fl.Field().String()) } -// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color. +// isRGBA is the validation function for validating if the current field's value is a valid RGBA color. func isRGBA(fl FieldLevel) bool { return rgbaRegex.MatchString(fl.Field().String()) } -// IsRGB is the validation function for validating if the current field's value is a valid RGB color. +// isRGB is the validation function for validating if the current field's value is a valid RGB color. func isRGB(fl FieldLevel) bool { return rgbRegex.MatchString(fl.Field().String()) } -// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color. +// isHEXColor is the validation function for validating if the current field's value is a valid HEX color. func isHEXColor(fl FieldLevel) bool { return hexColorRegex.MatchString(fl.Field().String()) } -// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal. +// isHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal. func isHexadecimal(fl FieldLevel) bool { return hexadecimalRegex.MatchString(fl.Field().String()) } -// IsNumber is the validation function for validating if the current field's value is a valid number. +// isNumber is the validation function for validating if the current field's value is a valid number. func isNumber(fl FieldLevel) bool { switch fl.Field().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: @@ -1406,7 +1411,7 @@ func isNumber(fl FieldLevel) bool { } } -// IsNumeric is the validation function for validating if the current field's value is a valid numeric value. +// isNumeric is the validation function for validating if the current field's value is a valid numeric value. func isNumeric(fl FieldLevel) bool { switch fl.Field().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: @@ -1416,32 +1421,38 @@ func isNumeric(fl FieldLevel) bool { } } -// IsAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value. +// isAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value. func isAlphanum(fl FieldLevel) bool { return alphaNumericRegex.MatchString(fl.Field().String()) } -// IsAlpha is the validation function for validating if the current field's value is a valid alpha value. +// isAlpha is the validation function for validating if the current field's value is a valid alpha value. func isAlpha(fl FieldLevel) bool { return alphaRegex.MatchString(fl.Field().String()) } -// IsAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value. +// isAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value. func isAlphanumUnicode(fl FieldLevel) bool { return alphaUnicodeNumericRegex.MatchString(fl.Field().String()) } -// IsAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value. +// isAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value. func isAlphaUnicode(fl FieldLevel) bool { return alphaUnicodeRegex.MatchString(fl.Field().String()) } +// isBoolean is the validation function for validating if the current field's value can be safely converted to a boolean. +func isBoolean(fl FieldLevel) bool { + _, err := strconv.ParseBool(fl.Field().String()) + return err == nil +} + // isDefault is the opposite of required aka hasValue func isDefault(fl FieldLevel) bool { return !hasValue(fl) } -// HasValue is the validation function for validating if the current field's value is not the default static value. +// hasValue is the validation function for validating if the current field's value is not the default static value. func hasValue(fl FieldLevel) bool { field := fl.Field() switch field.Kind() { @@ -1539,7 +1550,7 @@ func requiredUnless(fl FieldLevel) bool { return hasValue(fl) } -// ExcludedWith is the validation function +// excludedWith is the validation function // The field under validation must not be present or is empty if any of the other specified fields are present. func excludedWith(fl FieldLevel) bool { params := parseOneOfParam2(fl.Param()) @@ -1551,7 +1562,7 @@ func excludedWith(fl FieldLevel) bool { return true } -// RequiredWith is the validation function +// requiredWith is the validation function // The field under validation must be present and not empty only if any of the other specified fields are present. func requiredWith(fl FieldLevel) bool { params := parseOneOfParam2(fl.Param()) @@ -1563,7 +1574,7 @@ func requiredWith(fl FieldLevel) bool { return true } -// ExcludedWithAll is the validation function +// excludedWithAll is the validation function // The field under validation must not be present or is empty if all of the other specified fields are present. func excludedWithAll(fl FieldLevel) bool { params := parseOneOfParam2(fl.Param()) @@ -1575,7 +1586,7 @@ func excludedWithAll(fl FieldLevel) bool { return !hasValue(fl) } -// RequiredWithAll is the validation function +// requiredWithAll is the validation function // The field under validation must be present and not empty only if all of the other specified fields are present. func requiredWithAll(fl FieldLevel) bool { params := parseOneOfParam2(fl.Param()) @@ -1587,7 +1598,7 @@ func requiredWithAll(fl FieldLevel) bool { return hasValue(fl) } -// ExcludedWithout is the validation function +// excludedWithout is the validation function // The field under validation must not be present or is empty when any of the other specified fields are not present. func excludedWithout(fl FieldLevel) bool { if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) { @@ -1596,7 +1607,7 @@ func excludedWithout(fl FieldLevel) bool { return true } -// RequiredWithout is the validation function +// requiredWithout is the validation function // The field under validation must be present and not empty only when any of the other specified fields are not present. func requiredWithout(fl FieldLevel) bool { if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) { @@ -1605,7 +1616,7 @@ func requiredWithout(fl FieldLevel) bool { return true } -// ExcludedWithoutAll is the validation function +// excludedWithoutAll is the validation function // The field under validation must not be present or is empty when all of the other specified fields are not present. func excludedWithoutAll(fl FieldLevel) bool { params := parseOneOfParam2(fl.Param()) @@ -1617,7 +1628,7 @@ func excludedWithoutAll(fl FieldLevel) bool { return !hasValue(fl) } -// RequiredWithoutAll is the validation function +// requiredWithoutAll is the validation function // The field under validation must be present and not empty only when all of the other specified fields are not present. func requiredWithoutAll(fl FieldLevel) bool { params := parseOneOfParam2(fl.Param()) @@ -1629,7 +1640,7 @@ func requiredWithoutAll(fl FieldLevel) bool { return hasValue(fl) } -// IsGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value. +// isGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value. func isGteField(fl FieldLevel) bool { field := fl.Field() @@ -1676,7 +1687,7 @@ func isGteField(fl FieldLevel) bool { return len(field.String()) >= len(currentField.String()) } -// IsGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value. +// isGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value. func isGtField(fl FieldLevel) bool { field := fl.Field() @@ -1723,7 +1734,7 @@ func isGtField(fl FieldLevel) bool { return len(field.String()) > len(currentField.String()) } -// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value. +// isGte is the validation function for validating if the current field's value is greater than or equal to the param's value. func isGte(fl FieldLevel) bool { field := fl.Field() @@ -1770,7 +1781,7 @@ func isGte(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } -// IsGt is the validation function for validating if the current field's value is greater than the param's value. +// isGt is the validation function for validating if the current field's value is greater than the param's value. func isGt(fl FieldLevel) bool { field := fl.Field() @@ -1813,7 +1824,7 @@ func isGt(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } -// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value. +// hasLengthOf is the validation function for validating if the current field's value is equal to the param's value. func hasLengthOf(fl FieldLevel) bool { field := fl.Field() @@ -1850,12 +1861,12 @@ func hasLengthOf(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } -// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value. +// hasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value. func hasMinOf(fl FieldLevel) bool { return isGte(fl) } -// IsLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value. +// isLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value. func isLteField(fl FieldLevel) bool { field := fl.Field() @@ -1902,7 +1913,7 @@ func isLteField(fl FieldLevel) bool { return len(field.String()) <= len(currentField.String()) } -// IsLtField is the validation function for validating if the current field's value is less than the field specified by the param's value. +// isLtField is the validation function for validating if the current field's value is less than the field specified by the param's value. func isLtField(fl FieldLevel) bool { field := fl.Field() @@ -1949,7 +1960,7 @@ func isLtField(fl FieldLevel) bool { return len(field.String()) < len(currentField.String()) } -// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value. +// isLte is the validation function for validating if the current field's value is less than or equal to the param's value. func isLte(fl FieldLevel) bool { field := fl.Field() @@ -1996,7 +2007,7 @@ func isLte(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } -// IsLt is the validation function for validating if the current field's value is less than the param's value. +// isLt is the validation function for validating if the current field's value is less than the param's value. func isLt(fl FieldLevel) bool { field := fl.Field() @@ -2040,12 +2051,12 @@ func isLt(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } -// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value. +// hasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value. func hasMaxOf(fl FieldLevel) bool { return isLte(fl) } -// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address. +// isTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address. func isTCP4AddrResolvable(fl FieldLevel) bool { if !isIP4Addr(fl) { @@ -2056,7 +2067,7 @@ func isTCP4AddrResolvable(fl FieldLevel) bool { return err == nil } -// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address. +// isTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address. func isTCP6AddrResolvable(fl FieldLevel) bool { if !isIP6Addr(fl) { @@ -2068,7 +2079,7 @@ func isTCP6AddrResolvable(fl FieldLevel) bool { return err == nil } -// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address. +// isTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address. func isTCPAddrResolvable(fl FieldLevel) bool { if !isIP4Addr(fl) && !isIP6Addr(fl) { @@ -2080,7 +2091,7 @@ func isTCPAddrResolvable(fl FieldLevel) bool { return err == nil } -// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address. +// isUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address. func isUDP4AddrResolvable(fl FieldLevel) bool { if !isIP4Addr(fl) { @@ -2092,7 +2103,7 @@ func isUDP4AddrResolvable(fl FieldLevel) bool { return err == nil } -// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address. +// isUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address. func isUDP6AddrResolvable(fl FieldLevel) bool { if !isIP6Addr(fl) { @@ -2104,7 +2115,7 @@ func isUDP6AddrResolvable(fl FieldLevel) bool { return err == nil } -// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address. +// isUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address. func isUDPAddrResolvable(fl FieldLevel) bool { if !isIP4Addr(fl) && !isIP6Addr(fl) { @@ -2116,7 +2127,7 @@ func isUDPAddrResolvable(fl FieldLevel) bool { return err == nil } -// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address. +// isIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address. func isIP4AddrResolvable(fl FieldLevel) bool { if !isIPv4(fl) { @@ -2128,7 +2139,7 @@ func isIP4AddrResolvable(fl FieldLevel) bool { return err == nil } -// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address. +// isIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address. func isIP6AddrResolvable(fl FieldLevel) bool { if !isIPv6(fl) { @@ -2140,7 +2151,7 @@ func isIP6AddrResolvable(fl FieldLevel) bool { return err == nil } -// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address. +// isIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address. func isIPAddrResolvable(fl FieldLevel) bool { if !isIP(fl) { @@ -2152,7 +2163,7 @@ func isIPAddrResolvable(fl FieldLevel) bool { return err == nil } -// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address. +// isUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address. func isUnixAddrResolvable(fl FieldLevel) bool { _, err := net.ResolveUnixAddr("unix", fl.Field().String()) @@ -2206,7 +2217,7 @@ func isFQDN(fl FieldLevel) bool { return fqdnRegexRFC1123.MatchString(val) } -// IsDir is the validation function for validating if the current field's value is a valid directory. +// isDir is the validation function for validating if the current field's value is a valid directory. func isDir(fl FieldLevel) bool { field := fl.Field() @@ -2234,6 +2245,11 @@ func isJSON(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } +// isJWT is the validation function for validating if the current field's value is a valid JWT string. +func isJWT(fl FieldLevel) bool { + return jWTRegex.MatchString(fl.Field().String()) +} + // isHostnamePort validates a : combination for fields typically used for socket address. func isHostnamePort(fl FieldLevel) bool { val := fl.Field().String() @@ -2345,6 +2361,34 @@ func isIso3166AlphaNumeric(fl FieldLevel) bool { return iso3166_1_alpha_numeric[code] } +// isIso31662 is the validation function for validating if the current field's value is a valid iso3166-2 code. +func isIso31662(fl FieldLevel) bool { + val := fl.Field().String() + return iso3166_2[val] +} + +// isIso4217 is the validation function for validating if the current field's value is a valid iso4217 currency code. +func isIso4217(fl FieldLevel) bool { + val := fl.Field().String() + return iso4217[val] +} + +// isIso4217Numeric is the validation function for validating if the current field's value is a valid iso4217 numeric currency code. +func isIso4217Numeric(fl FieldLevel) bool { + field := fl.Field() + + var code int + switch field.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + code = int(field.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + code = int(field.Uint()) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + return iso4217_numeric[code] +} + // isBCP47LanguageTag is the validation function for validating if the current field's value is a valid BCP 47 language tag, as parsed by language.Parse func isBCP47LanguageTag(fl FieldLevel) bool { field := fl.Field() diff --git a/vendor/github.com/go-playground/validator/v10/country_codes.go b/vendor/github.com/go-playground/validator/v10/country_codes.go index ef81eada..0d9eda03 100644 --- a/vendor/github.com/go-playground/validator/v10/country_codes.go +++ b/vendor/github.com/go-playground/validator/v10/country_codes.go @@ -160,3 +160,973 @@ var iso3166_1_alpha_numeric = map[int]bool{ 704: true, 92: true, 850: true, 876: true, 732: true, 887: true, 894: true, 716: true, 248: true, } + +var iso3166_2 = map[string]bool{ + "AD-02" : true, "AD-03" : true, "AD-04" : true, "AD-05" : true, "AD-06" : true, + "AD-07" : true, "AD-08" : true, "AE-AJ" : true, "AE-AZ" : true, "AE-DU" : true, + "AE-FU" : true, "AE-RK" : true, "AE-SH" : true, "AE-UQ" : true, "AF-BAL" : true, + "AF-BAM" : true, "AF-BDG" : true, "AF-BDS" : true, "AF-BGL" : true, "AF-DAY" : true, + "AF-FRA" : true, "AF-FYB" : true, "AF-GHA" : true, "AF-GHO" : true, "AF-HEL" : true, + "AF-HER" : true, "AF-JOW" : true, "AF-KAB" : true, "AF-KAN" : true, "AF-KAP" : true, + "AF-KDZ" : true, "AF-KHO" : true, "AF-KNR" : true, "AF-LAG" : true, "AF-LOG" : true, + "AF-NAN" : true, "AF-NIM" : true, "AF-NUR" : true, "AF-PAN" : true, "AF-PAR" : true, + "AF-PIA" : true, "AF-PKA" : true, "AF-SAM" : true, "AF-SAR" : true, "AF-TAK" : true, + "AF-URU" : true, "AF-WAR" : true, "AF-ZAB" : true, "AG-03" : true, "AG-04" : true, + "AG-05" : true, "AG-06" : true, "AG-07" : true, "AG-08" : true, "AG-10" : true, + "AG-11" : true, "AL-01" : true, "AL-02" : true, "AL-03" : true, "AL-04" : true, + "AL-05" : true, "AL-06" : true, "AL-07" : true, "AL-08" : true, "AL-09" : true, + "AL-10" : true, "AL-11" : true, "AL-12" : true, "AL-BR" : true, "AL-BU" : true, + "AL-DI" : true, "AL-DL" : true, "AL-DR" : true, "AL-DV" : true, "AL-EL" : true, + "AL-ER" : true, "AL-FR" : true, "AL-GJ" : true, "AL-GR" : true, "AL-HA" : true, + "AL-KA" : true, "AL-KB" : true, "AL-KC" : true, "AL-KO" : true, "AL-KR" : true, + "AL-KU" : true, "AL-LB" : true, "AL-LE" : true, "AL-LU" : true, "AL-MK" : true, + "AL-MM" : true, "AL-MR" : true, "AL-MT" : true, "AL-PG" : true, "AL-PQ" : true, + "AL-PR" : true, "AL-PU" : true, "AL-SH" : true, "AL-SK" : true, "AL-SR" : true, + "AL-TE" : true, "AL-TP" : true, "AL-TR" : true, "AL-VL" : true, "AM-AG" : true, + "AM-AR" : true, "AM-AV" : true, "AM-ER" : true, "AM-GR" : true, "AM-KT" : true, + "AM-LO" : true, "AM-SH" : true, "AM-SU" : true, "AM-TV" : true, "AM-VD" : true, + "AO-BGO" : true, "AO-BGU" : true, "AO-BIE" : true, "AO-CAB" : true, "AO-CCU" : true, + "AO-CNN" : true, "AO-CNO" : true, "AO-CUS" : true, "AO-HUA" : true, "AO-HUI" : true, + "AO-LNO" : true, "AO-LSU" : true, "AO-LUA" : true, "AO-MAL" : true, "AO-MOX" : true, + "AO-NAM" : true, "AO-UIG" : true, "AO-ZAI" : true, "AR-A" : true, "AR-B" : true, + "AR-C" : true, "AR-D" : true, "AR-E" : true, "AR-G" : true, "AR-H" : true, + "AR-J" : true, "AR-K" : true, "AR-L" : true, "AR-M" : true, "AR-N" : true, + "AR-P" : true, "AR-Q" : true, "AR-R" : true, "AR-S" : true, "AR-T" : true, + "AR-U" : true, "AR-V" : true, "AR-W" : true, "AR-X" : true, "AR-Y" : true, + "AR-Z" : true, "AT-1" : true, "AT-2" : true, "AT-3" : true, "AT-4" : true, + "AT-5" : true, "AT-6" : true, "AT-7" : true, "AT-8" : true, "AT-9" : true, + "AU-ACT" : true, "AU-NSW" : true, "AU-NT" : true, "AU-QLD" : true, "AU-SA" : true, + "AU-TAS" : true, "AU-VIC" : true, "AU-WA" : true, "AZ-ABS" : true, "AZ-AGA" : true, + "AZ-AGC" : true, "AZ-AGM" : true, "AZ-AGS" : true, "AZ-AGU" : true, "AZ-AST" : true, + "AZ-BA" : true, "AZ-BAB" : true, "AZ-BAL" : true, "AZ-BAR" : true, "AZ-BEY" : true, + "AZ-BIL" : true, "AZ-CAB" : true, "AZ-CAL" : true, "AZ-CUL" : true, "AZ-DAS" : true, + "AZ-FUZ" : true, "AZ-GA" : true, "AZ-GAD" : true, "AZ-GOR" : true, "AZ-GOY" : true, + "AZ-GYG" : true, "AZ-HAC" : true, "AZ-IMI" : true, "AZ-ISM" : true, "AZ-KAL" : true, + "AZ-KAN" : true, "AZ-KUR" : true, "AZ-LA" : true, "AZ-LAC" : true, "AZ-LAN" : true, + "AZ-LER" : true, "AZ-MAS" : true, "AZ-MI" : true, "AZ-NA" : true, "AZ-NEF" : true, + "AZ-NV" : true, "AZ-NX" : true, "AZ-OGU" : true, "AZ-ORD" : true, "AZ-QAB" : true, + "AZ-QAX" : true, "AZ-QAZ" : true, "AZ-QBA" : true, "AZ-QBI" : true, "AZ-QOB" : true, + "AZ-QUS" : true, "AZ-SA" : true, "AZ-SAB" : true, "AZ-SAD" : true, "AZ-SAH" : true, + "AZ-SAK" : true, "AZ-SAL" : true, "AZ-SAR" : true, "AZ-SAT" : true, "AZ-SBN" : true, + "AZ-SIY" : true, "AZ-SKR" : true, "AZ-SM" : true, "AZ-SMI" : true, "AZ-SMX" : true, + "AZ-SR" : true, "AZ-SUS" : true, "AZ-TAR" : true, "AZ-TOV" : true, "AZ-UCA" : true, + "AZ-XA" : true, "AZ-XAC" : true, "AZ-XCI" : true, "AZ-XIZ" : true, "AZ-XVD" : true, + "AZ-YAR" : true, "AZ-YE" : true, "AZ-YEV" : true, "AZ-ZAN" : true, "AZ-ZAQ" : true, + "AZ-ZAR" : true, "BA-01" : true, "BA-02" : true, "BA-03" : true, "BA-04" : true, + "BA-05" : true, "BA-06" : true, "BA-07" : true, "BA-08" : true, "BA-09" : true, + "BA-10" : true, "BA-BIH" : true, "BA-BRC" : true, "BA-SRP" : true, "BB-01" : true, + "BB-02" : true, "BB-03" : true, "BB-04" : true, "BB-05" : true, "BB-06" : true, + "BB-07" : true, "BB-08" : true, "BB-09" : true, "BB-10" : true, "BB-11" : true, + "BD-01" : true, "BD-02" : true, "BD-03" : true, "BD-04" : true, "BD-05" : true, + "BD-06" : true, "BD-07" : true, "BD-08" : true, "BD-09" : true, "BD-10" : true, + "BD-11" : true, "BD-12" : true, "BD-13" : true, "BD-14" : true, "BD-15" : true, + "BD-16" : true, "BD-17" : true, "BD-18" : true, "BD-19" : true, "BD-20" : true, + "BD-21" : true, "BD-22" : true, "BD-23" : true, "BD-24" : true, "BD-25" : true, + "BD-26" : true, "BD-27" : true, "BD-28" : true, "BD-29" : true, "BD-30" : true, + "BD-31" : true, "BD-32" : true, "BD-33" : true, "BD-34" : true, "BD-35" : true, + "BD-36" : true, "BD-37" : true, "BD-38" : true, "BD-39" : true, "BD-40" : true, + "BD-41" : true, "BD-42" : true, "BD-43" : true, "BD-44" : true, "BD-45" : true, + "BD-46" : true, "BD-47" : true, "BD-48" : true, "BD-49" : true, "BD-50" : true, + "BD-51" : true, "BD-52" : true, "BD-53" : true, "BD-54" : true, "BD-55" : true, + "BD-56" : true, "BD-57" : true, "BD-58" : true, "BD-59" : true, "BD-60" : true, + "BD-61" : true, "BD-62" : true, "BD-63" : true, "BD-64" : true, "BD-A" : true, + "BD-B" : true, "BD-C" : true, "BD-D" : true, "BD-E" : true, "BD-F" : true, + "BD-G" : true, "BE-BRU" : true, "BE-VAN" : true, "BE-VBR" : true, "BE-VLG" : true, + "BE-VLI" : true, "BE-VOV" : true, "BE-VWV" : true, "BE-WAL" : true, "BE-WBR" : true, + "BE-WHT" : true, "BE-WLG" : true, "BE-WLX" : true, "BE-WNA" : true, "BF-01" : true, + "BF-02" : true, "BF-03" : true, "BF-04" : true, "BF-05" : true, "BF-06" : true, + "BF-07" : true, "BF-08" : true, "BF-09" : true, "BF-10" : true, "BF-11" : true, + "BF-12" : true, "BF-13" : true, "BF-BAL" : true, "BF-BAM" : true, "BF-BAN" : true, + "BF-BAZ" : true, "BF-BGR" : true, "BF-BLG" : true, "BF-BLK" : true, "BF-COM" : true, + "BF-GAN" : true, "BF-GNA" : true, "BF-GOU" : true, "BF-HOU" : true, "BF-IOB" : true, + "BF-KAD" : true, "BF-KEN" : true, "BF-KMD" : true, "BF-KMP" : true, "BF-KOP" : true, + "BF-KOS" : true, "BF-KOT" : true, "BF-KOW" : true, "BF-LER" : true, "BF-LOR" : true, + "BF-MOU" : true, "BF-NAM" : true, "BF-NAO" : true, "BF-NAY" : true, "BF-NOU" : true, + "BF-OUB" : true, "BF-OUD" : true, "BF-PAS" : true, "BF-PON" : true, "BF-SEN" : true, + "BF-SIS" : true, "BF-SMT" : true, "BF-SNG" : true, "BF-SOM" : true, "BF-SOR" : true, + "BF-TAP" : true, "BF-TUI" : true, "BF-YAG" : true, "BF-YAT" : true, "BF-ZIR" : true, + "BF-ZON" : true, "BF-ZOU" : true, "BG-01" : true, "BG-02" : true, "BG-03" : true, + "BG-04" : true, "BG-05" : true, "BG-06" : true, "BG-07" : true, "BG-08" : true, + "BG-09" : true, "BG-10" : true, "BG-11" : true, "BG-12" : true, "BG-13" : true, + "BG-14" : true, "BG-15" : true, "BG-16" : true, "BG-17" : true, "BG-18" : true, + "BG-19" : true, "BG-20" : true, "BG-21" : true, "BG-22" : true, "BG-23" : true, + "BG-24" : true, "BG-25" : true, "BG-26" : true, "BG-27" : true, "BG-28" : true, + "BH-13" : true, "BH-14" : true, "BH-15" : true, "BH-16" : true, "BH-17" : true, + "BI-BB" : true, "BI-BL" : true, "BI-BM" : true, "BI-BR" : true, "BI-CA" : true, + "BI-CI" : true, "BI-GI" : true, "BI-KI" : true, "BI-KR" : true, "BI-KY" : true, + "BI-MA" : true, "BI-MU" : true, "BI-MW" : true, "BI-NG" : true, "BI-RT" : true, + "BI-RY" : true, "BJ-AK" : true, "BJ-AL" : true, "BJ-AQ" : true, "BJ-BO" : true, + "BJ-CO" : true, "BJ-DO" : true, "BJ-KO" : true, "BJ-LI" : true, "BJ-MO" : true, + "BJ-OU" : true, "BJ-PL" : true, "BJ-ZO" : true, "BN-BE" : true, "BN-BM" : true, + "BN-TE" : true, "BN-TU" : true, "BO-B" : true, "BO-C" : true, "BO-H" : true, + "BO-L" : true, "BO-N" : true, "BO-O" : true, "BO-P" : true, "BO-S" : true, + "BO-T" : true, "BQ-BO" : true, "BQ-SA" : true, "BQ-SE" : true, "BR-AC" : true, + "BR-AL" : true, "BR-AM" : true, "BR-AP" : true, "BR-BA" : true, "BR-CE" : true, + "BR-DF" : true, "BR-ES" : true, "BR-FN" : true, "BR-GO" : true, "BR-MA" : true, + "BR-MG" : true, "BR-MS" : true, "BR-MT" : true, "BR-PA" : true, "BR-PB" : true, + "BR-PE" : true, "BR-PI" : true, "BR-PR" : true, "BR-RJ" : true, "BR-RN" : true, + "BR-RO" : true, "BR-RR" : true, "BR-RS" : true, "BR-SC" : true, "BR-SE" : true, + "BR-SP" : true, "BR-TO" : true, "BS-AK" : true, "BS-BI" : true, "BS-BP" : true, + "BS-BY" : true, "BS-CE" : true, "BS-CI" : true, "BS-CK" : true, "BS-CO" : true, + "BS-CS" : true, "BS-EG" : true, "BS-EX" : true, "BS-FP" : true, "BS-GC" : true, + "BS-HI" : true, "BS-HT" : true, "BS-IN" : true, "BS-LI" : true, "BS-MC" : true, + "BS-MG" : true, "BS-MI" : true, "BS-NE" : true, "BS-NO" : true, "BS-NS" : true, + "BS-RC" : true, "BS-RI" : true, "BS-SA" : true, "BS-SE" : true, "BS-SO" : true, + "BS-SS" : true, "BS-SW" : true, "BS-WG" : true, "BT-11" : true, "BT-12" : true, + "BT-13" : true, "BT-14" : true, "BT-15" : true, "BT-21" : true, "BT-22" : true, + "BT-23" : true, "BT-24" : true, "BT-31" : true, "BT-32" : true, "BT-33" : true, + "BT-34" : true, "BT-41" : true, "BT-42" : true, "BT-43" : true, "BT-44" : true, + "BT-45" : true, "BT-GA" : true, "BT-TY" : true, "BW-CE" : true, "BW-GH" : true, + "BW-KG" : true, "BW-KL" : true, "BW-KW" : true, "BW-NE" : true, "BW-NW" : true, + "BW-SE" : true, "BW-SO" : true, "BY-BR" : true, "BY-HM" : true, "BY-HO" : true, + "BY-HR" : true, "BY-MA" : true, "BY-MI" : true, "BY-VI" : true, "BZ-BZ" : true, + "BZ-CY" : true, "BZ-CZL" : true, "BZ-OW" : true, "BZ-SC" : true, "BZ-TOL" : true, + "CA-AB" : true, "CA-BC" : true, "CA-MB" : true, "CA-NB" : true, "CA-NL" : true, + "CA-NS" : true, "CA-NT" : true, "CA-NU" : true, "CA-ON" : true, "CA-PE" : true, + "CA-QC" : true, "CA-SK" : true, "CA-YT" : true, "CD-BC" : true, "CD-BN" : true, + "CD-EQ" : true, "CD-KA" : true, "CD-KE" : true, "CD-KN" : true, "CD-KW" : true, + "CD-MA" : true, "CD-NK" : true, "CD-OR" : true, "CD-SK" : true, "CF-AC" : true, + "CF-BB" : true, "CF-BGF" : true, "CF-BK" : true, "CF-HK" : true, "CF-HM" : true, + "CF-HS" : true, "CF-KB" : true, "CF-KG" : true, "CF-LB" : true, "CF-MB" : true, + "CF-MP" : true, "CF-NM" : true, "CF-OP" : true, "CF-SE" : true, "CF-UK" : true, + "CF-VK" : true, "CG-11" : true, "CG-12" : true, "CG-13" : true, "CG-14" : true, + "CG-15" : true, "CG-2" : true, "CG-5" : true, "CG-7" : true, "CG-8" : true, + "CG-9" : true, "CG-BZV" : true, "CH-AG" : true, "CH-AI" : true, "CH-AR" : true, + "CH-BE" : true, "CH-BL" : true, "CH-BS" : true, "CH-FR" : true, "CH-GE" : true, + "CH-GL" : true, "CH-GR" : true, "CH-JU" : true, "CH-LU" : true, "CH-NE" : true, + "CH-NW" : true, "CH-OW" : true, "CH-SG" : true, "CH-SH" : true, "CH-SO" : true, + "CH-SZ" : true, "CH-TG" : true, "CH-TI" : true, "CH-UR" : true, "CH-VD" : true, + "CH-VS" : true, "CH-ZG" : true, "CH-ZH" : true, "CI-01" : true, "CI-02" : true, + "CI-03" : true, "CI-04" : true, "CI-05" : true, "CI-06" : true, "CI-07" : true, + "CI-08" : true, "CI-09" : true, "CI-10" : true, "CI-11" : true, "CI-12" : true, + "CI-13" : true, "CI-14" : true, "CI-15" : true, "CI-16" : true, "CI-17" : true, + "CI-18" : true, "CI-19" : true, "CL-AI" : true, "CL-AN" : true, "CL-AP" : true, + "CL-AR" : true, "CL-AT" : true, "CL-BI" : true, "CL-CO" : true, "CL-LI" : true, + "CL-LL" : true, "CL-LR" : true, "CL-MA" : true, "CL-ML" : true, "CL-RM" : true, + "CL-TA" : true, "CL-VS" : true, "CM-AD" : true, "CM-CE" : true, "CM-EN" : true, + "CM-ES" : true, "CM-LT" : true, "CM-NO" : true, "CM-NW" : true, "CM-OU" : true, + "CM-SU" : true, "CM-SW" : true, "CN-11" : true, "CN-12" : true, "CN-13" : true, + "CN-14" : true, "CN-15" : true, "CN-21" : true, "CN-22" : true, "CN-23" : true, + "CN-31" : true, "CN-32" : true, "CN-33" : true, "CN-34" : true, "CN-35" : true, + "CN-36" : true, "CN-37" : true, "CN-41" : true, "CN-42" : true, "CN-43" : true, + "CN-44" : true, "CN-45" : true, "CN-46" : true, "CN-50" : true, "CN-51" : true, + "CN-52" : true, "CN-53" : true, "CN-54" : true, "CN-61" : true, "CN-62" : true, + "CN-63" : true, "CN-64" : true, "CN-65" : true, "CN-71" : true, "CN-91" : true, + "CN-92" : true, "CO-AMA" : true, "CO-ANT" : true, "CO-ARA" : true, "CO-ATL" : true, + "CO-BOL" : true, "CO-BOY" : true, "CO-CAL" : true, "CO-CAQ" : true, "CO-CAS" : true, + "CO-CAU" : true, "CO-CES" : true, "CO-CHO" : true, "CO-COR" : true, "CO-CUN" : true, + "CO-DC" : true, "CO-GUA" : true, "CO-GUV" : true, "CO-HUI" : true, "CO-LAG" : true, + "CO-MAG" : true, "CO-MET" : true, "CO-NAR" : true, "CO-NSA" : true, "CO-PUT" : true, + "CO-QUI" : true, "CO-RIS" : true, "CO-SAN" : true, "CO-SAP" : true, "CO-SUC" : true, + "CO-TOL" : true, "CO-VAC" : true, "CO-VAU" : true, "CO-VID" : true, "CR-A" : true, + "CR-C" : true, "CR-G" : true, "CR-H" : true, "CR-L" : true, "CR-P" : true, + "CR-SJ" : true, "CU-01" : true, "CU-02" : true, "CU-03" : true, "CU-04" : true, + "CU-05" : true, "CU-06" : true, "CU-07" : true, "CU-08" : true, "CU-09" : true, + "CU-10" : true, "CU-11" : true, "CU-12" : true, "CU-13" : true, "CU-14" : true, + "CU-99" : true, "CV-B" : true, "CV-BR" : true, "CV-BV" : true, "CV-CA" : true, + "CV-CF" : true, "CV-CR" : true, "CV-MA" : true, "CV-MO" : true, "CV-PA" : true, + "CV-PN" : true, "CV-PR" : true, "CV-RB" : true, "CV-RG" : true, "CV-RS" : true, + "CV-S" : true, "CV-SD" : true, "CV-SF" : true, "CV-SL" : true, "CV-SM" : true, + "CV-SO" : true, "CV-SS" : true, "CV-SV" : true, "CV-TA" : true, "CV-TS" : true, + "CY-01" : true, "CY-02" : true, "CY-03" : true, "CY-04" : true, "CY-05" : true, + "CY-06" : true, "CZ-10" : true, "CZ-101" : true, "CZ-102" : true, "CZ-103" : true, + "CZ-104" : true, "CZ-105" : true, "CZ-106" : true, "CZ-107" : true, "CZ-108" : true, + "CZ-109" : true, "CZ-110" : true, "CZ-111" : true, "CZ-112" : true, "CZ-113" : true, + "CZ-114" : true, "CZ-115" : true, "CZ-116" : true, "CZ-117" : true, "CZ-118" : true, + "CZ-119" : true, "CZ-120" : true, "CZ-121" : true, "CZ-122" : true, "CZ-20" : true, + "CZ-201" : true, "CZ-202" : true, "CZ-203" : true, "CZ-204" : true, "CZ-205" : true, + "CZ-206" : true, "CZ-207" : true, "CZ-208" : true, "CZ-209" : true, "CZ-20A" : true, + "CZ-20B" : true, "CZ-20C" : true, "CZ-31" : true, "CZ-311" : true, "CZ-312" : true, + "CZ-313" : true, "CZ-314" : true, "CZ-315" : true, "CZ-316" : true, "CZ-317" : true, + "CZ-32" : true, "CZ-321" : true, "CZ-322" : true, "CZ-323" : true, "CZ-324" : true, + "CZ-325" : true, "CZ-326" : true, "CZ-327" : true, "CZ-41" : true, "CZ-411" : true, + "CZ-412" : true, "CZ-413" : true, "CZ-42" : true, "CZ-421" : true, "CZ-422" : true, + "CZ-423" : true, "CZ-424" : true, "CZ-425" : true, "CZ-426" : true, "CZ-427" : true, + "CZ-51" : true, "CZ-511" : true, "CZ-512" : true, "CZ-513" : true, "CZ-514" : true, + "CZ-52" : true, "CZ-521" : true, "CZ-522" : true, "CZ-523" : true, "CZ-524" : true, + "CZ-525" : true, "CZ-53" : true, "CZ-531" : true, "CZ-532" : true, "CZ-533" : true, + "CZ-534" : true, "CZ-63" : true, "CZ-631" : true, "CZ-632" : true, "CZ-633" : true, + "CZ-634" : true, "CZ-635" : true, "CZ-64" : true, "CZ-641" : true, "CZ-642" : true, + "CZ-643" : true, "CZ-644" : true, "CZ-645" : true, "CZ-646" : true, "CZ-647" : true, + "CZ-71" : true, "CZ-711" : true, "CZ-712" : true, "CZ-713" : true, "CZ-714" : true, + "CZ-715" : true, "CZ-72" : true, "CZ-721" : true, "CZ-722" : true, "CZ-723" : true, + "CZ-724" : true, "CZ-80" : true, "CZ-801" : true, "CZ-802" : true, "CZ-803" : true, + "CZ-804" : true, "CZ-805" : true, "CZ-806" : true, "DE-BB" : true, "DE-BE" : true, + "DE-BW" : true, "DE-BY" : true, "DE-HB" : true, "DE-HE" : true, "DE-HH" : true, + "DE-MV" : true, "DE-NI" : true, "DE-NW" : true, "DE-RP" : true, "DE-SH" : true, + "DE-SL" : true, "DE-SN" : true, "DE-ST" : true, "DE-TH" : true, "DJ-AR" : true, + "DJ-AS" : true, "DJ-DI" : true, "DJ-DJ" : true, "DJ-OB" : true, "DJ-TA" : true, + "DK-81" : true, "DK-82" : true, "DK-83" : true, "DK-84" : true, "DK-85" : true, + "DM-01" : true, "DM-02" : true, "DM-03" : true, "DM-04" : true, "DM-05" : true, + "DM-06" : true, "DM-07" : true, "DM-08" : true, "DM-09" : true, "DM-10" : true, + "DO-01" : true, "DO-02" : true, "DO-03" : true, "DO-04" : true, "DO-05" : true, + "DO-06" : true, "DO-07" : true, "DO-08" : true, "DO-09" : true, "DO-10" : true, + "DO-11" : true, "DO-12" : true, "DO-13" : true, "DO-14" : true, "DO-15" : true, + "DO-16" : true, "DO-17" : true, "DO-18" : true, "DO-19" : true, "DO-20" : true, + "DO-21" : true, "DO-22" : true, "DO-23" : true, "DO-24" : true, "DO-25" : true, + "DO-26" : true, "DO-27" : true, "DO-28" : true, "DO-29" : true, "DO-30" : true, + "DZ-01" : true, "DZ-02" : true, "DZ-03" : true, "DZ-04" : true, "DZ-05" : true, + "DZ-06" : true, "DZ-07" : true, "DZ-08" : true, "DZ-09" : true, "DZ-10" : true, + "DZ-11" : true, "DZ-12" : true, "DZ-13" : true, "DZ-14" : true, "DZ-15" : true, + "DZ-16" : true, "DZ-17" : true, "DZ-18" : true, "DZ-19" : true, "DZ-20" : true, + "DZ-21" : true, "DZ-22" : true, "DZ-23" : true, "DZ-24" : true, "DZ-25" : true, + "DZ-26" : true, "DZ-27" : true, "DZ-28" : true, "DZ-29" : true, "DZ-30" : true, + "DZ-31" : true, "DZ-32" : true, "DZ-33" : true, "DZ-34" : true, "DZ-35" : true, + "DZ-36" : true, "DZ-37" : true, "DZ-38" : true, "DZ-39" : true, "DZ-40" : true, + "DZ-41" : true, "DZ-42" : true, "DZ-43" : true, "DZ-44" : true, "DZ-45" : true, + "DZ-46" : true, "DZ-47" : true, "DZ-48" : true, "EC-A" : true, "EC-B" : true, + "EC-C" : true, "EC-D" : true, "EC-E" : true, "EC-F" : true, "EC-G" : true, + "EC-H" : true, "EC-I" : true, "EC-L" : true, "EC-M" : true, "EC-N" : true, + "EC-O" : true, "EC-P" : true, "EC-R" : true, "EC-S" : true, "EC-SD" : true, + "EC-SE" : true, "EC-T" : true, "EC-U" : true, "EC-W" : true, "EC-X" : true, + "EC-Y" : true, "EC-Z" : true, "EE-37" : true, "EE-39" : true, "EE-44" : true, + "EE-49" : true, "EE-51" : true, "EE-57" : true, "EE-59" : true, "EE-65" : true, + "EE-67" : true, "EE-70" : true, "EE-74" : true, "EE-78" : true, "EE-82" : true, + "EE-84" : true, "EE-86" : true, "EG-ALX" : true, "EG-ASN" : true, "EG-AST" : true, + "EG-BA" : true, "EG-BH" : true, "EG-BNS" : true, "EG-C" : true, "EG-DK" : true, + "EG-DT" : true, "EG-FYM" : true, "EG-GH" : true, "EG-GZ" : true, "EG-HU" : true, + "EG-IS" : true, "EG-JS" : true, "EG-KB" : true, "EG-KFS" : true, "EG-KN" : true, + "EG-MN" : true, "EG-MNF" : true, "EG-MT" : true, "EG-PTS" : true, "EG-SHG" : true, + "EG-SHR" : true, "EG-SIN" : true, "EG-SU" : true, "EG-SUZ" : true, "EG-WAD" : true, + "ER-AN" : true, "ER-DK" : true, "ER-DU" : true, "ER-GB" : true, "ER-MA" : true, + "ER-SK" : true, "ES-A" : true, "ES-AB" : true, "ES-AL" : true, "ES-AN" : true, + "ES-AR" : true, "ES-AS" : true, "ES-AV" : true, "ES-B" : true, "ES-BA" : true, + "ES-BI" : true, "ES-BU" : true, "ES-C" : true, "ES-CA" : true, "ES-CB" : true, + "ES-CC" : true, "ES-CE" : true, "ES-CL" : true, "ES-CM" : true, "ES-CN" : true, + "ES-CO" : true, "ES-CR" : true, "ES-CS" : true, "ES-CT" : true, "ES-CU" : true, + "ES-EX" : true, "ES-GA" : true, "ES-GC" : true, "ES-GI" : true, "ES-GR" : true, + "ES-GU" : true, "ES-H" : true, "ES-HU" : true, "ES-IB" : true, "ES-J" : true, + "ES-L" : true, "ES-LE" : true, "ES-LO" : true, "ES-LU" : true, "ES-M" : true, + "ES-MA" : true, "ES-MC" : true, "ES-MD" : true, "ES-ML" : true, "ES-MU" : true, + "ES-NA" : true, "ES-NC" : true, "ES-O" : true, "ES-OR" : true, "ES-P" : true, + "ES-PM" : true, "ES-PO" : true, "ES-PV" : true, "ES-RI" : true, "ES-S" : true, + "ES-SA" : true, "ES-SE" : true, "ES-SG" : true, "ES-SO" : true, "ES-SS" : true, + "ES-T" : true, "ES-TE" : true, "ES-TF" : true, "ES-TO" : true, "ES-V" : true, + "ES-VA" : true, "ES-VC" : true, "ES-VI" : true, "ES-Z" : true, "ES-ZA" : true, + "ET-AA" : true, "ET-AF" : true, "ET-AM" : true, "ET-BE" : true, "ET-DD" : true, + "ET-GA" : true, "ET-HA" : true, "ET-OR" : true, "ET-SN" : true, "ET-SO" : true, + "ET-TI" : true, "FI-01" : true, "FI-02" : true, "FI-03" : true, "FI-04" : true, + "FI-05" : true, "FI-06" : true, "FI-07" : true, "FI-08" : true, "FI-09" : true, + "FI-10" : true, "FI-11" : true, "FI-12" : true, "FI-13" : true, "FI-14" : true, + "FI-15" : true, "FI-16" : true, "FI-17" : true, "FI-18" : true, "FI-19" : true, + "FJ-C" : true, "FJ-E" : true, "FJ-N" : true, "FJ-R" : true, "FJ-W" : true, + "FM-KSA" : true, "FM-PNI" : true, "FM-TRK" : true, "FM-YAP" : true, "FR-01" : true, + "FR-02" : true, "FR-03" : true, "FR-04" : true, "FR-05" : true, "FR-06" : true, + "FR-07" : true, "FR-08" : true, "FR-09" : true, "FR-10" : true, "FR-11" : true, + "FR-12" : true, "FR-13" : true, "FR-14" : true, "FR-15" : true, "FR-16" : true, + "FR-17" : true, "FR-18" : true, "FR-19" : true, "FR-21" : true, "FR-22" : true, + "FR-23" : true, "FR-24" : true, "FR-25" : true, "FR-26" : true, "FR-27" : true, + "FR-28" : true, "FR-29" : true, "FR-2A" : true, "FR-2B" : true, "FR-30" : true, + "FR-31" : true, "FR-32" : true, "FR-33" : true, "FR-34" : true, "FR-35" : true, + "FR-36" : true, "FR-37" : true, "FR-38" : true, "FR-39" : true, "FR-40" : true, + "FR-41" : true, "FR-42" : true, "FR-43" : true, "FR-44" : true, "FR-45" : true, + "FR-46" : true, "FR-47" : true, "FR-48" : true, "FR-49" : true, "FR-50" : true, + "FR-51" : true, "FR-52" : true, "FR-53" : true, "FR-54" : true, "FR-55" : true, + "FR-56" : true, "FR-57" : true, "FR-58" : true, "FR-59" : true, "FR-60" : true, + "FR-61" : true, "FR-62" : true, "FR-63" : true, "FR-64" : true, "FR-65" : true, + "FR-66" : true, "FR-67" : true, "FR-68" : true, "FR-69" : true, "FR-70" : true, + "FR-71" : true, "FR-72" : true, "FR-73" : true, "FR-74" : true, "FR-75" : true, + "FR-76" : true, "FR-77" : true, "FR-78" : true, "FR-79" : true, "FR-80" : true, + "FR-81" : true, "FR-82" : true, "FR-83" : true, "FR-84" : true, "FR-85" : true, + "FR-86" : true, "FR-87" : true, "FR-88" : true, "FR-89" : true, "FR-90" : true, + "FR-91" : true, "FR-92" : true, "FR-93" : true, "FR-94" : true, "FR-95" : true, + "FR-ARA" : true, "FR-BFC" : true, "FR-BL" : true, "FR-BRE" : true, "FR-COR" : true, + "FR-CP" : true, "FR-CVL" : true, "FR-GES" : true, "FR-GF" : true, "FR-GP" : true, + "FR-GUA" : true, "FR-HDF" : true, "FR-IDF" : true, "FR-LRE" : true, "FR-MAY" : true, + "FR-MF" : true, "FR-MQ" : true, "FR-NAQ" : true, "FR-NC" : true, "FR-NOR" : true, + "FR-OCC" : true, "FR-PAC" : true, "FR-PDL" : true, "FR-PF" : true, "FR-PM" : true, + "FR-RE" : true, "FR-TF" : true, "FR-WF" : true, "FR-YT" : true, "GA-1" : true, + "GA-2" : true, "GA-3" : true, "GA-4" : true, "GA-5" : true, "GA-6" : true, + "GA-7" : true, "GA-8" : true, "GA-9" : true, "GB-ABC" : true, "GB-ABD" : true, + "GB-ABE" : true, "GB-AGB" : true, "GB-AGY" : true, "GB-AND" : true, "GB-ANN" : true, + "GB-ANS" : true, "GB-BAS" : true, "GB-BBD" : true, "GB-BDF" : true, "GB-BDG" : true, + "GB-BEN" : true, "GB-BEX" : true, "GB-BFS" : true, "GB-BGE" : true, "GB-BGW" : true, + "GB-BIR" : true, "GB-BKM" : true, "GB-BMH" : true, "GB-BNE" : true, "GB-BNH" : true, + "GB-BNS" : true, "GB-BOL" : true, "GB-BPL" : true, "GB-BRC" : true, "GB-BRD" : true, + "GB-BRY" : true, "GB-BST" : true, "GB-BUR" : true, "GB-CAM" : true, "GB-CAY" : true, + "GB-CBF" : true, "GB-CCG" : true, "GB-CGN" : true, "GB-CHE" : true, "GB-CHW" : true, + "GB-CLD" : true, "GB-CLK" : true, "GB-CMA" : true, "GB-CMD" : true, "GB-CMN" : true, + "GB-CON" : true, "GB-COV" : true, "GB-CRF" : true, "GB-CRY" : true, "GB-CWY" : true, + "GB-DAL" : true, "GB-DBY" : true, "GB-DEN" : true, "GB-DER" : true, "GB-DEV" : true, + "GB-DGY" : true, "GB-DNC" : true, "GB-DND" : true, "GB-DOR" : true, "GB-DRS" : true, + "GB-DUD" : true, "GB-DUR" : true, "GB-EAL" : true, "GB-EAW" : true, "GB-EAY" : true, + "GB-EDH" : true, "GB-EDU" : true, "GB-ELN" : true, "GB-ELS" : true, "GB-ENF" : true, + "GB-ENG" : true, "GB-ERW" : true, "GB-ERY" : true, "GB-ESS" : true, "GB-ESX" : true, + "GB-FAL" : true, "GB-FIF" : true, "GB-FLN" : true, "GB-FMO" : true, "GB-GAT" : true, + "GB-GBN" : true, "GB-GLG" : true, "GB-GLS" : true, "GB-GRE" : true, "GB-GWN" : true, + "GB-HAL" : true, "GB-HAM" : true, "GB-HAV" : true, "GB-HCK" : true, "GB-HEF" : true, + "GB-HIL" : true, "GB-HLD" : true, "GB-HMF" : true, "GB-HNS" : true, "GB-HPL" : true, + "GB-HRT" : true, "GB-HRW" : true, "GB-HRY" : true, "GB-IOS" : true, "GB-IOW" : true, + "GB-ISL" : true, "GB-IVC" : true, "GB-KEC" : true, "GB-KEN" : true, "GB-KHL" : true, + "GB-KIR" : true, "GB-KTT" : true, "GB-KWL" : true, "GB-LAN" : true, "GB-LBC" : true, + "GB-LBH" : true, "GB-LCE" : true, "GB-LDS" : true, "GB-LEC" : true, "GB-LEW" : true, + "GB-LIN" : true, "GB-LIV" : true, "GB-LND" : true, "GB-LUT" : true, "GB-MAN" : true, + "GB-MDB" : true, "GB-MDW" : true, "GB-MEA" : true, "GB-MIK" : true, "GD-01" : true, + "GB-MLN" : true, "GB-MON" : true, "GB-MRT" : true, "GB-MRY" : true, "GB-MTY" : true, + "GB-MUL" : true, "GB-NAY" : true, "GB-NBL" : true, "GB-NEL" : true, "GB-NET" : true, + "GB-NFK" : true, "GB-NGM" : true, "GB-NIR" : true, "GB-NLK" : true, "GB-NLN" : true, + "GB-NMD" : true, "GB-NSM" : true, "GB-NTH" : true, "GB-NTL" : true, "GB-NTT" : true, + "GB-NTY" : true, "GB-NWM" : true, "GB-NWP" : true, "GB-NYK" : true, "GB-OLD" : true, + "GB-ORK" : true, "GB-OXF" : true, "GB-PEM" : true, "GB-PKN" : true, "GB-PLY" : true, + "GB-POL" : true, "GB-POR" : true, "GB-POW" : true, "GB-PTE" : true, "GB-RCC" : true, + "GB-RCH" : true, "GB-RCT" : true, "GB-RDB" : true, "GB-RDG" : true, "GB-RFW" : true, + "GB-RIC" : true, "GB-ROT" : true, "GB-RUT" : true, "GB-SAW" : true, "GB-SAY" : true, + "GB-SCB" : true, "GB-SCT" : true, "GB-SFK" : true, "GB-SFT" : true, "GB-SGC" : true, + "GB-SHF" : true, "GB-SHN" : true, "GB-SHR" : true, "GB-SKP" : true, "GB-SLF" : true, + "GB-SLG" : true, "GB-SLK" : true, "GB-SND" : true, "GB-SOL" : true, "GB-SOM" : true, + "GB-SOS" : true, "GB-SRY" : true, "GB-STE" : true, "GB-STG" : true, "GB-STH" : true, + "GB-STN" : true, "GB-STS" : true, "GB-STT" : true, "GB-STY" : true, "GB-SWA" : true, + "GB-SWD" : true, "GB-SWK" : true, "GB-TAM" : true, "GB-TFW" : true, "GB-THR" : true, + "GB-TOB" : true, "GB-TOF" : true, "GB-TRF" : true, "GB-TWH" : true, "GB-UKM" : true, + "GB-VGL" : true, "GB-WAR" : true, "GB-WBK" : true, "GB-WDU" : true, "GB-WFT" : true, + "GB-WGN" : true, "GB-WIL" : true, "GB-WKF" : true, "GB-WLL" : true, "GB-WLN" : true, + "GB-WLS" : true, "GB-WLV" : true, "GB-WND" : true, "GB-WNM" : true, "GB-WOK" : true, + "GB-WOR" : true, "GB-WRL" : true, "GB-WRT" : true, "GB-WRX" : true, "GB-WSM" : true, + "GB-WSX" : true, "GB-YOR" : true, "GB-ZET" : true, "GD-02" : true, "GD-03" : true, + "GD-04" : true, "GD-05" : true, "GD-06" : true, "GD-10" : true, "GE-AB" : true, + "GE-AJ" : true, "GE-GU" : true, "GE-IM" : true, "GE-KA" : true, "GE-KK" : true, + "GE-MM" : true, "GE-RL" : true, "GE-SJ" : true, "GE-SK" : true, "GE-SZ" : true, + "GE-TB" : true, "GH-AA" : true, "GH-AH" : true, "GH-BA" : true, "GH-CP" : true, + "GH-EP" : true, "GH-NP" : true, "GH-TV" : true, "GH-UE" : true, "GH-UW" : true, + "GH-WP" : true, "GL-KU" : true, "GL-QA" : true, "GL-QE" : true, "GL-SM" : true, + "GM-B" : true, "GM-L" : true, "GM-M" : true, "GM-N" : true, "GM-U" : true, + "GM-W" : true, "GN-B" : true, "GN-BE" : true, "GN-BF" : true, "GN-BK" : true, + "GN-C" : true, "GN-CO" : true, "GN-D" : true, "GN-DB" : true, "GN-DI" : true, + "GN-DL" : true, "GN-DU" : true, "GN-F" : true, "GN-FA" : true, "GN-FO" : true, + "GN-FR" : true, "GN-GA" : true, "GN-GU" : true, "GN-K" : true, "GN-KA" : true, + "GN-KB" : true, "GN-KD" : true, "GN-KE" : true, "GN-KN" : true, "GN-KO" : true, + "GN-KS" : true, "GN-L" : true, "GN-LA" : true, "GN-LE" : true, "GN-LO" : true, + "GN-M" : true, "GN-MC" : true, "GN-MD" : true, "GN-ML" : true, "GN-MM" : true, + "GN-N" : true, "GN-NZ" : true, "GN-PI" : true, "GN-SI" : true, "GN-TE" : true, + "GN-TO" : true, "GN-YO" : true, "GQ-AN" : true, "GQ-BN" : true, "GQ-BS" : true, + "GQ-C" : true, "GQ-CS" : true, "GQ-I" : true, "GQ-KN" : true, "GQ-LI" : true, + "GQ-WN" : true, "GR-01" : true, "GR-03" : true, "GR-04" : true, "GR-05" : true, + "GR-06" : true, "GR-07" : true, "GR-11" : true, "GR-12" : true, "GR-13" : true, + "GR-14" : true, "GR-15" : true, "GR-16" : true, "GR-17" : true, "GR-21" : true, + "GR-22" : true, "GR-23" : true, "GR-24" : true, "GR-31" : true, "GR-32" : true, + "GR-33" : true, "GR-34" : true, "GR-41" : true, "GR-42" : true, "GR-43" : true, + "GR-44" : true, "GR-51" : true, "GR-52" : true, "GR-53" : true, "GR-54" : true, + "GR-55" : true, "GR-56" : true, "GR-57" : true, "GR-58" : true, "GR-59" : true, + "GR-61" : true, "GR-62" : true, "GR-63" : true, "GR-64" : true, "GR-69" : true, + "GR-71" : true, "GR-72" : true, "GR-73" : true, "GR-81" : true, "GR-82" : true, + "GR-83" : true, "GR-84" : true, "GR-85" : true, "GR-91" : true, "GR-92" : true, + "GR-93" : true, "GR-94" : true, "GR-A" : true, "GR-A1" : true, "GR-B" : true, + "GR-C" : true, "GR-D" : true, "GR-E" : true, "GR-F" : true, "GR-G" : true, + "GR-H" : true, "GR-I" : true, "GR-J" : true, "GR-K" : true, "GR-L" : true, + "GR-M" : true, "GT-AV" : true, "GT-BV" : true, "GT-CM" : true, "GT-CQ" : true, + "GT-ES" : true, "GT-GU" : true, "GT-HU" : true, "GT-IZ" : true, "GT-JA" : true, + "GT-JU" : true, "GT-PE" : true, "GT-PR" : true, "GT-QC" : true, "GT-QZ" : true, + "GT-RE" : true, "GT-SA" : true, "GT-SM" : true, "GT-SO" : true, "GT-SR" : true, + "GT-SU" : true, "GT-TO" : true, "GT-ZA" : true, "GW-BA" : true, "GW-BL" : true, + "GW-BM" : true, "GW-BS" : true, "GW-CA" : true, "GW-GA" : true, "GW-L" : true, + "GW-N" : true, "GW-OI" : true, "GW-QU" : true, "GW-S" : true, "GW-TO" : true, + "GY-BA" : true, "GY-CU" : true, "GY-DE" : true, "GY-EB" : true, "GY-ES" : true, + "GY-MA" : true, "GY-PM" : true, "GY-PT" : true, "GY-UD" : true, "GY-UT" : true, + "HN-AT" : true, "HN-CH" : true, "HN-CL" : true, "HN-CM" : true, "HN-CP" : true, + "HN-CR" : true, "HN-EP" : true, "HN-FM" : true, "HN-GD" : true, "HN-IB" : true, + "HN-IN" : true, "HN-LE" : true, "HN-LP" : true, "HN-OC" : true, "HN-OL" : true, + "HN-SB" : true, "HN-VA" : true, "HN-YO" : true, "HR-01" : true, "HR-02" : true, + "HR-03" : true, "HR-04" : true, "HR-05" : true, "HR-06" : true, "HR-07" : true, + "HR-08" : true, "HR-09" : true, "HR-10" : true, "HR-11" : true, "HR-12" : true, + "HR-13" : true, "HR-14" : true, "HR-15" : true, "HR-16" : true, "HR-17" : true, + "HR-18" : true, "HR-19" : true, "HR-20" : true, "HR-21" : true, "HT-AR" : true, + "HT-CE" : true, "HT-GA" : true, "HT-ND" : true, "HT-NE" : true, "HT-NO" : true, + "HT-OU" : true, "HT-SD" : true, "HT-SE" : true, "HU-BA" : true, "HU-BC" : true, + "HU-BE" : true, "HU-BK" : true, "HU-BU" : true, "HU-BZ" : true, "HU-CS" : true, + "HU-DE" : true, "HU-DU" : true, "HU-EG" : true, "HU-ER" : true, "HU-FE" : true, + "HU-GS" : true, "HU-GY" : true, "HU-HB" : true, "HU-HE" : true, "HU-HV" : true, + "HU-JN" : true, "HU-KE" : true, "HU-KM" : true, "HU-KV" : true, "HU-MI" : true, + "HU-NK" : true, "HU-NO" : true, "HU-NY" : true, "HU-PE" : true, "HU-PS" : true, + "HU-SD" : true, "HU-SF" : true, "HU-SH" : true, "HU-SK" : true, "HU-SN" : true, + "HU-SO" : true, "HU-SS" : true, "HU-ST" : true, "HU-SZ" : true, "HU-TB" : true, + "HU-TO" : true, "HU-VA" : true, "HU-VE" : true, "HU-VM" : true, "HU-ZA" : true, + "HU-ZE" : true, "ID-AC" : true, "ID-BA" : true, "ID-BB" : true, "ID-BE" : true, + "ID-BT" : true, "ID-GO" : true, "ID-IJ" : true, "ID-JA" : true, "ID-JB" : true, + "ID-JI" : true, "ID-JK" : true, "ID-JT" : true, "ID-JW" : true, "ID-KA" : true, + "ID-KB" : true, "ID-KI" : true, "ID-KR" : true, "ID-KS" : true, "ID-KT" : true, + "ID-LA" : true, "ID-MA" : true, "ID-ML" : true, "ID-MU" : true, "ID-NB" : true, + "ID-NT" : true, "ID-NU" : true, "ID-PA" : true, "ID-PB" : true, "ID-RI" : true, + "ID-SA" : true, "ID-SB" : true, "ID-SG" : true, "ID-SL" : true, "ID-SM" : true, + "ID-SN" : true, "ID-SR" : true, "ID-SS" : true, "ID-ST" : true, "ID-SU" : true, + "ID-YO" : true, "IE-C" : true, "IE-CE" : true, "IE-CN" : true, "IE-CO" : true, + "IE-CW" : true, "IE-D" : true, "IE-DL" : true, "IE-G" : true, "IE-KE" : true, + "IE-KK" : true, "IE-KY" : true, "IE-L" : true, "IE-LD" : true, "IE-LH" : true, + "IE-LK" : true, "IE-LM" : true, "IE-LS" : true, "IE-M" : true, "IE-MH" : true, + "IE-MN" : true, "IE-MO" : true, "IE-OY" : true, "IE-RN" : true, "IE-SO" : true, + "IE-TA" : true, "IE-U" : true, "IE-WD" : true, "IE-WH" : true, "IE-WW" : true, + "IE-WX" : true, "IL-D" : true, "IL-HA" : true, "IL-JM" : true, "IL-M" : true, + "IL-TA" : true, "IL-Z" : true, "IN-AN" : true, "IN-AP" : true, "IN-AR" : true, + "IN-AS" : true, "IN-BR" : true, "IN-CH" : true, "IN-CT" : true, "IN-DD" : true, + "IN-DL" : true, "IN-DN" : true, "IN-GA" : true, "IN-GJ" : true, "IN-HP" : true, + "IN-HR" : true, "IN-JH" : true, "IN-JK" : true, "IN-KA" : true, "IN-KL" : true, + "IN-LD" : true, "IN-MH" : true, "IN-ML" : true, "IN-MN" : true, "IN-MP" : true, + "IN-MZ" : true, "IN-NL" : true, "IN-OR" : true, "IN-PB" : true, "IN-PY" : true, + "IN-RJ" : true, "IN-SK" : true, "IN-TN" : true, "IN-TR" : true, "IN-UP" : true, + "IN-UT" : true, "IN-WB" : true, "IQ-AN" : true, "IQ-AR" : true, "IQ-BA" : true, + "IQ-BB" : true, "IQ-BG" : true, "IQ-DA" : true, "IQ-DI" : true, "IQ-DQ" : true, + "IQ-KA" : true, "IQ-MA" : true, "IQ-MU" : true, "IQ-NA" : true, "IQ-NI" : true, + "IQ-QA" : true, "IQ-SD" : true, "IQ-SW" : true, "IQ-TS" : true, "IQ-WA" : true, + "IR-01" : true, "IR-02" : true, "IR-03" : true, "IR-04" : true, "IR-05" : true, + "IR-06" : true, "IR-07" : true, "IR-08" : true, "IR-10" : true, "IR-11" : true, + "IR-12" : true, "IR-13" : true, "IR-14" : true, "IR-15" : true, "IR-16" : true, + "IR-17" : true, "IR-18" : true, "IR-19" : true, "IR-20" : true, "IR-21" : true, + "IR-22" : true, "IR-23" : true, "IR-24" : true, "IR-25" : true, "IR-26" : true, + "IR-27" : true, "IR-28" : true, "IR-29" : true, "IR-30" : true, "IR-31" : true, + "IS-0" : true, "IS-1" : true, "IS-2" : true, "IS-3" : true, "IS-4" : true, + "IS-5" : true, "IS-6" : true, "IS-7" : true, "IS-8" : true, "IT-21" : true, + "IT-23" : true, "IT-25" : true, "IT-32" : true, "IT-34" : true, "IT-36" : true, + "IT-42" : true, "IT-45" : true, "IT-52" : true, "IT-55" : true, "IT-57" : true, + "IT-62" : true, "IT-65" : true, "IT-67" : true, "IT-72" : true, "IT-75" : true, + "IT-77" : true, "IT-78" : true, "IT-82" : true, "IT-88" : true, "IT-AG" : true, + "IT-AL" : true, "IT-AN" : true, "IT-AO" : true, "IT-AP" : true, "IT-AQ" : true, + "IT-AR" : true, "IT-AT" : true, "IT-AV" : true, "IT-BA" : true, "IT-BG" : true, + "IT-BI" : true, "IT-BL" : true, "IT-BN" : true, "IT-BO" : true, "IT-BR" : true, + "IT-BS" : true, "IT-BT" : true, "IT-BZ" : true, "IT-CA" : true, "IT-CB" : true, + "IT-CE" : true, "IT-CH" : true, "IT-CI" : true, "IT-CL" : true, "IT-CN" : true, + "IT-CO" : true, "IT-CR" : true, "IT-CS" : true, "IT-CT" : true, "IT-CZ" : true, + "IT-EN" : true, "IT-FC" : true, "IT-FE" : true, "IT-FG" : true, "IT-FI" : true, + "IT-FM" : true, "IT-FR" : true, "IT-GE" : true, "IT-GO" : true, "IT-GR" : true, + "IT-IM" : true, "IT-IS" : true, "IT-KR" : true, "IT-LC" : true, "IT-LE" : true, + "IT-LI" : true, "IT-LO" : true, "IT-LT" : true, "IT-LU" : true, "IT-MB" : true, + "IT-MC" : true, "IT-ME" : true, "IT-MI" : true, "IT-MN" : true, "IT-MO" : true, + "IT-MS" : true, "IT-MT" : true, "IT-NA" : true, "IT-NO" : true, "IT-NU" : true, + "IT-OG" : true, "IT-OR" : true, "IT-OT" : true, "IT-PA" : true, "IT-PC" : true, + "IT-PD" : true, "IT-PE" : true, "IT-PG" : true, "IT-PI" : true, "IT-PN" : true, + "IT-PO" : true, "IT-PR" : true, "IT-PT" : true, "IT-PU" : true, "IT-PV" : true, + "IT-PZ" : true, "IT-RA" : true, "IT-RC" : true, "IT-RE" : true, "IT-RG" : true, + "IT-RI" : true, "IT-RM" : true, "IT-RN" : true, "IT-RO" : true, "IT-SA" : true, + "IT-SI" : true, "IT-SO" : true, "IT-SP" : true, "IT-SR" : true, "IT-SS" : true, + "IT-SV" : true, "IT-TA" : true, "IT-TE" : true, "IT-TN" : true, "IT-TO" : true, + "IT-TP" : true, "IT-TR" : true, "IT-TS" : true, "IT-TV" : true, "IT-UD" : true, + "IT-VA" : true, "IT-VB" : true, "IT-VC" : true, "IT-VE" : true, "IT-VI" : true, + "IT-VR" : true, "IT-VS" : true, "IT-VT" : true, "IT-VV" : true, "JM-01" : true, + "JM-02" : true, "JM-03" : true, "JM-04" : true, "JM-05" : true, "JM-06" : true, + "JM-07" : true, "JM-08" : true, "JM-09" : true, "JM-10" : true, "JM-11" : true, + "JM-12" : true, "JM-13" : true, "JM-14" : true, "JO-AJ" : true, "JO-AM" : true, + "JO-AQ" : true, "JO-AT" : true, "JO-AZ" : true, "JO-BA" : true, "JO-IR" : true, + "JO-JA" : true, "JO-KA" : true, "JO-MA" : true, "JO-MD" : true, "JO-MN" : true, + "JP-01" : true, "JP-02" : true, "JP-03" : true, "JP-04" : true, "JP-05" : true, + "JP-06" : true, "JP-07" : true, "JP-08" : true, "JP-09" : true, "JP-10" : true, + "JP-11" : true, "JP-12" : true, "JP-13" : true, "JP-14" : true, "JP-15" : true, + "JP-16" : true, "JP-17" : true, "JP-18" : true, "JP-19" : true, "JP-20" : true, + "JP-21" : true, "JP-22" : true, "JP-23" : true, "JP-24" : true, "JP-25" : true, + "JP-26" : true, "JP-27" : true, "JP-28" : true, "JP-29" : true, "JP-30" : true, + "JP-31" : true, "JP-32" : true, "JP-33" : true, "JP-34" : true, "JP-35" : true, + "JP-36" : true, "JP-37" : true, "JP-38" : true, "JP-39" : true, "JP-40" : true, + "JP-41" : true, "JP-42" : true, "JP-43" : true, "JP-44" : true, "JP-45" : true, + "JP-46" : true, "JP-47" : true, "KE-110" : true, "KE-200" : true, "KE-300" : true, + "KE-400" : true, "KE-500" : true, "KE-700" : true, "KE-800" : true, "KG-B" : true, + "KG-C" : true, "KG-GB" : true, "KG-J" : true, "KG-N" : true, "KG-O" : true, + "KG-T" : true, "KG-Y" : true, "KH-1" : true, "KH-10" : true, "KH-11" : true, + "KH-12" : true, "KH-13" : true, "KH-14" : true, "KH-15" : true, "KH-16" : true, + "KH-17" : true, "KH-18" : true, "KH-19" : true, "KH-2" : true, "KH-20" : true, + "KH-21" : true, "KH-22" : true, "KH-23" : true, "KH-24" : true, "KH-3" : true, + "KH-4" : true, "KH-5" : true, "KH-6" : true, "KH-7" : true, "KH-8" : true, + "KH-9" : true, "KI-G" : true, "KI-L" : true, "KI-P" : true, "KM-A" : true, + "KM-G" : true, "KM-M" : true, "KN-01" : true, "KN-02" : true, "KN-03" : true, + "KN-04" : true, "KN-05" : true, "KN-06" : true, "KN-07" : true, "KN-08" : true, + "KN-09" : true, "KN-10" : true, "KN-11" : true, "KN-12" : true, "KN-13" : true, + "KN-15" : true, "KN-K" : true, "KN-N" : true, "KP-01" : true, "KP-02" : true, + "KP-03" : true, "KP-04" : true, "KP-05" : true, "KP-06" : true, "KP-07" : true, + "KP-08" : true, "KP-09" : true, "KP-10" : true, "KP-13" : true, "KR-11" : true, + "KR-26" : true, "KR-27" : true, "KR-28" : true, "KR-29" : true, "KR-30" : true, + "KR-31" : true, "KR-41" : true, "KR-42" : true, "KR-43" : true, "KR-44" : true, + "KR-45" : true, "KR-46" : true, "KR-47" : true, "KR-48" : true, "KR-49" : true, + "KW-AH" : true, "KW-FA" : true, "KW-HA" : true, "KW-JA" : true, "KW-KU" : true, + "KW-MU" : true, "KZ-AKM" : true, "KZ-AKT" : true, "KZ-ALA" : true, "KZ-ALM" : true, + "KZ-AST" : true, "KZ-ATY" : true, "KZ-KAR" : true, "KZ-KUS" : true, "KZ-KZY" : true, + "KZ-MAN" : true, "KZ-PAV" : true, "KZ-SEV" : true, "KZ-VOS" : true, "KZ-YUZ" : true, + "KZ-ZAP" : true, "KZ-ZHA" : true, "LA-AT" : true, "LA-BK" : true, "LA-BL" : true, + "LA-CH" : true, "LA-HO" : true, "LA-KH" : true, "LA-LM" : true, "LA-LP" : true, + "LA-OU" : true, "LA-PH" : true, "LA-SL" : true, "LA-SV" : true, "LA-VI" : true, + "LA-VT" : true, "LA-XA" : true, "LA-XE" : true, "LA-XI" : true, "LA-XS" : true, + "LB-AK" : true, "LB-AS" : true, "LB-BA" : true, "LB-BH" : true, "LB-BI" : true, + "LB-JA" : true, "LB-JL" : true, "LB-NA" : true, "LI-01" : true, "LI-02" : true, + "LI-03" : true, "LI-04" : true, "LI-05" : true, "LI-06" : true, "LI-07" : true, + "LI-08" : true, "LI-09" : true, "LI-10" : true, "LI-11" : true, "LK-1" : true, + "LK-11" : true, "LK-12" : true, "LK-13" : true, "LK-2" : true, "LK-21" : true, + "LK-22" : true, "LK-23" : true, "LK-3" : true, "LK-31" : true, "LK-32" : true, + "LK-33" : true, "LK-4" : true, "LK-41" : true, "LK-42" : true, "LK-43" : true, + "LK-44" : true, "LK-45" : true, "LK-5" : true, "LK-51" : true, "LK-52" : true, + "LK-53" : true, "LK-6" : true, "LK-61" : true, "LK-62" : true, "LK-7" : true, + "LK-71" : true, "LK-72" : true, "LK-8" : true, "LK-81" : true, "LK-82" : true, + "LK-9" : true, "LK-91" : true, "LK-92" : true, "LR-BG" : true, "LR-BM" : true, + "LR-CM" : true, "LR-GB" : true, "LR-GG" : true, "LR-GK" : true, "LR-LO" : true, + "LR-MG" : true, "LR-MO" : true, "LR-MY" : true, "LR-NI" : true, "LR-RI" : true, + "LR-SI" : true, "LS-A" : true, "LS-B" : true, "LS-C" : true, "LS-D" : true, + "LS-E" : true, "LS-F" : true, "LS-G" : true, "LS-H" : true, "LS-J" : true, + "LS-K" : true, "LT-AL" : true, "LT-KL" : true, "LT-KU" : true, "LT-MR" : true, + "LT-PN" : true, "LT-SA" : true, "LT-TA" : true, "LT-TE" : true, "LT-UT" : true, + "LT-VL" : true, "LU-D" : true, "LU-G" : true, "LU-L" : true, "LV-001" : true, + "LV-002" : true, "LV-003" : true, "LV-004" : true, "LV-005" : true, "LV-006" : true, + "LV-007" : true, "LV-008" : true, "LV-009" : true, "LV-010" : true, "LV-011" : true, + "LV-012" : true, "LV-013" : true, "LV-014" : true, "LV-015" : true, "LV-016" : true, + "LV-017" : true, "LV-018" : true, "LV-019" : true, "LV-020" : true, "LV-021" : true, + "LV-022" : true, "LV-023" : true, "LV-024" : true, "LV-025" : true, "LV-026" : true, + "LV-027" : true, "LV-028" : true, "LV-029" : true, "LV-030" : true, "LV-031" : true, + "LV-032" : true, "LV-033" : true, "LV-034" : true, "LV-035" : true, "LV-036" : true, + "LV-037" : true, "LV-038" : true, "LV-039" : true, "LV-040" : true, "LV-041" : true, + "LV-042" : true, "LV-043" : true, "LV-044" : true, "LV-045" : true, "LV-046" : true, + "LV-047" : true, "LV-048" : true, "LV-049" : true, "LV-050" : true, "LV-051" : true, + "LV-052" : true, "LV-053" : true, "LV-054" : true, "LV-055" : true, "LV-056" : true, + "LV-057" : true, "LV-058" : true, "LV-059" : true, "LV-060" : true, "LV-061" : true, + "LV-062" : true, "LV-063" : true, "LV-064" : true, "LV-065" : true, "LV-066" : true, + "LV-067" : true, "LV-068" : true, "LV-069" : true, "LV-070" : true, "LV-071" : true, + "LV-072" : true, "LV-073" : true, "LV-074" : true, "LV-075" : true, "LV-076" : true, + "LV-077" : true, "LV-078" : true, "LV-079" : true, "LV-080" : true, "LV-081" : true, + "LV-082" : true, "LV-083" : true, "LV-084" : true, "LV-085" : true, "LV-086" : true, + "LV-087" : true, "LV-088" : true, "LV-089" : true, "LV-090" : true, "LV-091" : true, + "LV-092" : true, "LV-093" : true, "LV-094" : true, "LV-095" : true, "LV-096" : true, + "LV-097" : true, "LV-098" : true, "LV-099" : true, "LV-100" : true, "LV-101" : true, + "LV-102" : true, "LV-103" : true, "LV-104" : true, "LV-105" : true, "LV-106" : true, + "LV-107" : true, "LV-108" : true, "LV-109" : true, "LV-110" : true, "LV-DGV" : true, + "LV-JEL" : true, "LV-JKB" : true, "LV-JUR" : true, "LV-LPX" : true, "LV-REZ" : true, + "LV-RIX" : true, "LV-VEN" : true, "LV-VMR" : true, "LY-BA" : true, "LY-BU" : true, + "LY-DR" : true, "LY-GT" : true, "LY-JA" : true, "LY-JB" : true, "LY-JG" : true, + "LY-JI" : true, "LY-JU" : true, "LY-KF" : true, "LY-MB" : true, "LY-MI" : true, + "LY-MJ" : true, "LY-MQ" : true, "LY-NL" : true, "LY-NQ" : true, "LY-SB" : true, + "LY-SR" : true, "LY-TB" : true, "LY-WA" : true, "LY-WD" : true, "LY-WS" : true, + "LY-ZA" : true, "MA-01" : true, "MA-02" : true, "MA-03" : true, "MA-04" : true, + "MA-05" : true, "MA-06" : true, "MA-07" : true, "MA-08" : true, "MA-09" : true, + "MA-10" : true, "MA-11" : true, "MA-12" : true, "MA-13" : true, "MA-14" : true, + "MA-15" : true, "MA-16" : true, "MA-AGD" : true, "MA-AOU" : true, "MA-ASZ" : true, + "MA-AZI" : true, "MA-BEM" : true, "MA-BER" : true, "MA-BES" : true, "MA-BOD" : true, + "MA-BOM" : true, "MA-CAS" : true, "MA-CHE" : true, "MA-CHI" : true, "MA-CHT" : true, + "MA-ERR" : true, "MA-ESI" : true, "MA-ESM" : true, "MA-FAH" : true, "MA-FES" : true, + "MA-FIG" : true, "MA-GUE" : true, "MA-HAJ" : true, "MA-HAO" : true, "MA-HOC" : true, + "MA-IFR" : true, "MA-INE" : true, "MA-JDI" : true, "MA-JRA" : true, "MA-KEN" : true, + "MA-KES" : true, "MA-KHE" : true, "MA-KHN" : true, "MA-KHO" : true, "MA-LAA" : true, + "MA-LAR" : true, "MA-MED" : true, "MA-MEK" : true, "MA-MMD" : true, "MA-MMN" : true, + "MA-MOH" : true, "MA-MOU" : true, "MA-NAD" : true, "MA-NOU" : true, "MA-OUA" : true, + "MA-OUD" : true, "MA-OUJ" : true, "MA-RAB" : true, "MA-SAF" : true, "MA-SAL" : true, + "MA-SEF" : true, "MA-SET" : true, "MA-SIK" : true, "MA-SKH" : true, "MA-SYB" : true, + "MA-TAI" : true, "MA-TAO" : true, "MA-TAR" : true, "MA-TAT" : true, "MA-TAZ" : true, + "MA-TET" : true, "MA-TIZ" : true, "MA-TNG" : true, "MA-TNT" : true, "MA-ZAG" : true, + "MC-CL" : true, "MC-CO" : true, "MC-FO" : true, "MC-GA" : true, "MC-JE" : true, + "MC-LA" : true, "MC-MA" : true, "MC-MC" : true, "MC-MG" : true, "MC-MO" : true, + "MC-MU" : true, "MC-PH" : true, "MC-SD" : true, "MC-SO" : true, "MC-SP" : true, + "MC-SR" : true, "MC-VR" : true, "MD-AN" : true, "MD-BA" : true, "MD-BD" : true, + "MD-BR" : true, "MD-BS" : true, "MD-CA" : true, "MD-CL" : true, "MD-CM" : true, + "MD-CR" : true, "MD-CS" : true, "MD-CT" : true, "MD-CU" : true, "MD-DO" : true, + "MD-DR" : true, "MD-DU" : true, "MD-ED" : true, "MD-FA" : true, "MD-FL" : true, + "MD-GA" : true, "MD-GL" : true, "MD-HI" : true, "MD-IA" : true, "MD-LE" : true, + "MD-NI" : true, "MD-OC" : true, "MD-OR" : true, "MD-RE" : true, "MD-RI" : true, + "MD-SD" : true, "MD-SI" : true, "MD-SN" : true, "MD-SO" : true, "MD-ST" : true, + "MD-SV" : true, "MD-TA" : true, "MD-TE" : true, "MD-UN" : true, "ME-01" : true, + "ME-02" : true, "ME-03" : true, "ME-04" : true, "ME-05" : true, "ME-06" : true, + "ME-07" : true, "ME-08" : true, "ME-09" : true, "ME-10" : true, "ME-11" : true, + "ME-12" : true, "ME-13" : true, "ME-14" : true, "ME-15" : true, "ME-16" : true, + "ME-17" : true, "ME-18" : true, "ME-19" : true, "ME-20" : true, "ME-21" : true, + "MG-A" : true, "MG-D" : true, "MG-F" : true, "MG-M" : true, "MG-T" : true, + "MG-U" : true, "MH-ALK" : true, "MH-ALL" : true, "MH-ARN" : true, "MH-AUR" : true, + "MH-EBO" : true, "MH-ENI" : true, "MH-JAB" : true, "MH-JAL" : true, "MH-KIL" : true, + "MH-KWA" : true, "MH-L" : true, "MH-LAE" : true, "MH-LIB" : true, "MH-LIK" : true, + "MH-MAJ" : true, "MH-MAL" : true, "MH-MEJ" : true, "MH-MIL" : true, "MH-NMK" : true, + "MH-NMU" : true, "MH-RON" : true, "MH-T" : true, "MH-UJA" : true, "MH-UTI" : true, + "MH-WTJ" : true, "MH-WTN" : true, "MK-01" : true, "MK-02" : true, "MK-03" : true, + "MK-04" : true, "MK-05" : true, "MK-06" : true, "MK-07" : true, "MK-08" : true, + "MK-09" : true, "MK-10" : true, "MK-11" : true, "MK-12" : true, "MK-13" : true, + "MK-14" : true, "MK-15" : true, "MK-16" : true, "MK-17" : true, "MK-18" : true, + "MK-19" : true, "MK-20" : true, "MK-21" : true, "MK-22" : true, "MK-23" : true, + "MK-24" : true, "MK-25" : true, "MK-26" : true, "MK-27" : true, "MK-28" : true, + "MK-29" : true, "MK-30" : true, "MK-31" : true, "MK-32" : true, "MK-33" : true, + "MK-34" : true, "MK-35" : true, "MK-36" : true, "MK-37" : true, "MK-38" : true, + "MK-39" : true, "MK-40" : true, "MK-41" : true, "MK-42" : true, "MK-43" : true, + "MK-44" : true, "MK-45" : true, "MK-46" : true, "MK-47" : true, "MK-48" : true, + "MK-49" : true, "MK-50" : true, "MK-51" : true, "MK-52" : true, "MK-53" : true, + "MK-54" : true, "MK-55" : true, "MK-56" : true, "MK-57" : true, "MK-58" : true, + "MK-59" : true, "MK-60" : true, "MK-61" : true, "MK-62" : true, "MK-63" : true, + "MK-64" : true, "MK-65" : true, "MK-66" : true, "MK-67" : true, "MK-68" : true, + "MK-69" : true, "MK-70" : true, "MK-71" : true, "MK-72" : true, "MK-73" : true, + "MK-74" : true, "MK-75" : true, "MK-76" : true, "MK-77" : true, "MK-78" : true, + "MK-79" : true, "MK-80" : true, "MK-81" : true, "MK-82" : true, "MK-83" : true, + "MK-84" : true, "ML-1" : true, "ML-2" : true, "ML-3" : true, "ML-4" : true, + "ML-5" : true, "ML-6" : true, "ML-7" : true, "ML-8" : true, "ML-BK0" : true, + "MM-01" : true, "MM-02" : true, "MM-03" : true, "MM-04" : true, "MM-05" : true, + "MM-06" : true, "MM-07" : true, "MM-11" : true, "MM-12" : true, "MM-13" : true, + "MM-14" : true, "MM-15" : true, "MM-16" : true, "MM-17" : true, "MN-035" : true, + "MN-037" : true, "MN-039" : true, "MN-041" : true, "MN-043" : true, "MN-046" : true, + "MN-047" : true, "MN-049" : true, "MN-051" : true, "MN-053" : true, "MN-055" : true, + "MN-057" : true, "MN-059" : true, "MN-061" : true, "MN-063" : true, "MN-064" : true, + "MN-065" : true, "MN-067" : true, "MN-069" : true, "MN-071" : true, "MN-073" : true, + "MN-1" : true, "MR-01" : true, "MR-02" : true, "MR-03" : true, "MR-04" : true, + "MR-05" : true, "MR-06" : true, "MR-07" : true, "MR-08" : true, "MR-09" : true, + "MR-10" : true, "MR-11" : true, "MR-12" : true, "MR-NKC" : true, "MT-01" : true, + "MT-02" : true, "MT-03" : true, "MT-04" : true, "MT-05" : true, "MT-06" : true, + "MT-07" : true, "MT-08" : true, "MT-09" : true, "MT-10" : true, "MT-11" : true, + "MT-12" : true, "MT-13" : true, "MT-14" : true, "MT-15" : true, "MT-16" : true, + "MT-17" : true, "MT-18" : true, "MT-19" : true, "MT-20" : true, "MT-21" : true, + "MT-22" : true, "MT-23" : true, "MT-24" : true, "MT-25" : true, "MT-26" : true, + "MT-27" : true, "MT-28" : true, "MT-29" : true, "MT-30" : true, "MT-31" : true, + "MT-32" : true, "MT-33" : true, "MT-34" : true, "MT-35" : true, "MT-36" : true, + "MT-37" : true, "MT-38" : true, "MT-39" : true, "MT-40" : true, "MT-41" : true, + "MT-42" : true, "MT-43" : true, "MT-44" : true, "MT-45" : true, "MT-46" : true, + "MT-47" : true, "MT-48" : true, "MT-49" : true, "MT-50" : true, "MT-51" : true, + "MT-52" : true, "MT-53" : true, "MT-54" : true, "MT-55" : true, "MT-56" : true, + "MT-57" : true, "MT-58" : true, "MT-59" : true, "MT-60" : true, "MT-61" : true, + "MT-62" : true, "MT-63" : true, "MT-64" : true, "MT-65" : true, "MT-66" : true, + "MT-67" : true, "MT-68" : true, "MU-AG" : true, "MU-BL" : true, "MU-BR" : true, + "MU-CC" : true, "MU-CU" : true, "MU-FL" : true, "MU-GP" : true, "MU-MO" : true, + "MU-PA" : true, "MU-PL" : true, "MU-PU" : true, "MU-PW" : true, "MU-QB" : true, + "MU-RO" : true, "MU-RP" : true, "MU-SA" : true, "MU-VP" : true, "MV-00" : true, + "MV-01" : true, "MV-02" : true, "MV-03" : true, "MV-04" : true, "MV-05" : true, + "MV-07" : true, "MV-08" : true, "MV-12" : true, "MV-13" : true, "MV-14" : true, + "MV-17" : true, "MV-20" : true, "MV-23" : true, "MV-24" : true, "MV-25" : true, + "MV-26" : true, "MV-27" : true, "MV-28" : true, "MV-29" : true, "MV-CE" : true, + "MV-MLE" : true, "MV-NC" : true, "MV-NO" : true, "MV-SC" : true, "MV-SU" : true, + "MV-UN" : true, "MV-US" : true, "MW-BA" : true, "MW-BL" : true, "MW-C" : true, + "MW-CK" : true, "MW-CR" : true, "MW-CT" : true, "MW-DE" : true, "MW-DO" : true, + "MW-KR" : true, "MW-KS" : true, "MW-LI" : true, "MW-LK" : true, "MW-MC" : true, + "MW-MG" : true, "MW-MH" : true, "MW-MU" : true, "MW-MW" : true, "MW-MZ" : true, + "MW-N" : true, "MW-NB" : true, "MW-NE" : true, "MW-NI" : true, "MW-NK" : true, + "MW-NS" : true, "MW-NU" : true, "MW-PH" : true, "MW-RU" : true, "MW-S" : true, + "MW-SA" : true, "MW-TH" : true, "MW-ZO" : true, "MX-AGU" : true, "MX-BCN" : true, + "MX-BCS" : true, "MX-CAM" : true, "MX-CHH" : true, "MX-CHP" : true, "MX-COA" : true, + "MX-COL" : true, "MX-DIF" : true, "MX-DUR" : true, "MX-GRO" : true, "MX-GUA" : true, + "MX-HID" : true, "MX-JAL" : true, "MX-MEX" : true, "MX-MIC" : true, "MX-MOR" : true, + "MX-NAY" : true, "MX-NLE" : true, "MX-OAX" : true, "MX-PUE" : true, "MX-QUE" : true, + "MX-ROO" : true, "MX-SIN" : true, "MX-SLP" : true, "MX-SON" : true, "MX-TAB" : true, + "MX-TAM" : true, "MX-TLA" : true, "MX-VER" : true, "MX-YUC" : true, "MX-ZAC" : true, + "MY-01" : true, "MY-02" : true, "MY-03" : true, "MY-04" : true, "MY-05" : true, + "MY-06" : true, "MY-07" : true, "MY-08" : true, "MY-09" : true, "MY-10" : true, + "MY-11" : true, "MY-12" : true, "MY-13" : true, "MY-14" : true, "MY-15" : true, + "MY-16" : true, "MZ-A" : true, "MZ-B" : true, "MZ-G" : true, "MZ-I" : true, + "MZ-L" : true, "MZ-MPM" : true, "MZ-N" : true, "MZ-P" : true, "MZ-Q" : true, + "MZ-S" : true, "MZ-T" : true, "NA-CA" : true, "NA-ER" : true, "NA-HA" : true, + "NA-KA" : true, "NA-KH" : true, "NA-KU" : true, "NA-OD" : true, "NA-OH" : true, + "NA-OK" : true, "NA-ON" : true, "NA-OS" : true, "NA-OT" : true, "NA-OW" : true, + "NE-1" : true, "NE-2" : true, "NE-3" : true, "NE-4" : true, "NE-5" : true, + "NE-6" : true, "NE-7" : true, "NE-8" : true, "NG-AB" : true, "NG-AD" : true, + "NG-AK" : true, "NG-AN" : true, "NG-BA" : true, "NG-BE" : true, "NG-BO" : true, + "NG-BY" : true, "NG-CR" : true, "NG-DE" : true, "NG-EB" : true, "NG-ED" : true, + "NG-EK" : true, "NG-EN" : true, "NG-FC" : true, "NG-GO" : true, "NG-IM" : true, + "NG-JI" : true, "NG-KD" : true, "NG-KE" : true, "NG-KN" : true, "NG-KO" : true, + "NG-KT" : true, "NG-KW" : true, "NG-LA" : true, "NG-NA" : true, "NG-NI" : true, + "NG-OG" : true, "NG-ON" : true, "NG-OS" : true, "NG-OY" : true, "NG-PL" : true, + "NG-RI" : true, "NG-SO" : true, "NG-TA" : true, "NG-YO" : true, "NG-ZA" : true, + "NI-AN" : true, "NI-AS" : true, "NI-BO" : true, "NI-CA" : true, "NI-CI" : true, + "NI-CO" : true, "NI-ES" : true, "NI-GR" : true, "NI-JI" : true, "NI-LE" : true, + "NI-MD" : true, "NI-MN" : true, "NI-MS" : true, "NI-MT" : true, "NI-NS" : true, + "NI-RI" : true, "NI-SJ" : true, "NL-AW" : true, "NL-BQ1" : true, "NL-BQ2" : true, + "NL-BQ3" : true, "NL-CW" : true, "NL-DR" : true, "NL-FL" : true, "NL-FR" : true, + "NL-GE" : true, "NL-GR" : true, "NL-LI" : true, "NL-NB" : true, "NL-NH" : true, + "NL-OV" : true, "NL-SX" : true, "NL-UT" : true, "NL-ZE" : true, "NL-ZH" : true, + "NO-01" : true, "NO-02" : true, "NO-03" : true, "NO-04" : true, "NO-05" : true, + "NO-06" : true, "NO-07" : true, "NO-08" : true, "NO-09" : true, "NO-10" : true, + "NO-11" : true, "NO-12" : true, "NO-14" : true, "NO-15" : true, "NO-16" : true, + "NO-17" : true, "NO-18" : true, "NO-19" : true, "NO-20" : true, "NO-21" : true, + "NO-22" : true, "NP-1" : true, "NP-2" : true, "NP-3" : true, "NP-4" : true, + "NP-5" : true, "NP-BA" : true, "NP-BH" : true, "NP-DH" : true, "NP-GA" : true, + "NP-JA" : true, "NP-KA" : true, "NP-KO" : true, "NP-LU" : true, "NP-MA" : true, + "NP-ME" : true, "NP-NA" : true, "NP-RA" : true, "NP-SA" : true, "NP-SE" : true, + "NR-01" : true, "NR-02" : true, "NR-03" : true, "NR-04" : true, "NR-05" : true, + "NR-06" : true, "NR-07" : true, "NR-08" : true, "NR-09" : true, "NR-10" : true, + "NR-11" : true, "NR-12" : true, "NR-13" : true, "NR-14" : true, "NZ-AUK" : true, + "NZ-BOP" : true, "NZ-CAN" : true, "NZ-CIT" : true, "NZ-GIS" : true, "NZ-HKB" : true, + "NZ-MBH" : true, "NZ-MWT" : true, "NZ-N" : true, "NZ-NSN" : true, "NZ-NTL" : true, + "NZ-OTA" : true, "NZ-S" : true, "NZ-STL" : true, "NZ-TAS" : true, "NZ-TKI" : true, + "NZ-WGN" : true, "NZ-WKO" : true, "NZ-WTC" : true, "OM-BA" : true, "OM-BU" : true, + "OM-DA" : true, "OM-MA" : true, "OM-MU" : true, "OM-SH" : true, "OM-WU" : true, + "OM-ZA" : true, "OM-ZU" : true, "PA-1" : true, "PA-2" : true, "PA-3" : true, + "PA-4" : true, "PA-5" : true, "PA-6" : true, "PA-7" : true, "PA-8" : true, + "PA-9" : true, "PA-EM" : true, "PA-KY" : true, "PA-NB" : true, "PE-AMA" : true, + "PE-ANC" : true, "PE-APU" : true, "PE-ARE" : true, "PE-AYA" : true, "PE-CAJ" : true, + "PE-CAL" : true, "PE-CUS" : true, "PE-HUC" : true, "PE-HUV" : true, "PE-ICA" : true, + "PE-JUN" : true, "PE-LAL" : true, "PE-LAM" : true, "PE-LIM" : true, "PE-LMA" : true, + "PE-LOR" : true, "PE-MDD" : true, "PE-MOQ" : true, "PE-PAS" : true, "PE-PIU" : true, + "PE-PUN" : true, "PE-SAM" : true, "PE-TAC" : true, "PE-TUM" : true, "PE-UCA" : true, + "PG-CPK" : true, "PG-CPM" : true, "PG-EBR" : true, "PG-EHG" : true, "PG-EPW" : true, + "PG-ESW" : true, "PG-GPK" : true, "PG-MBA" : true, "PG-MPL" : true, "PG-MPM" : true, + "PG-MRL" : true, "PG-NCD" : true, "PG-NIK" : true, "PG-NPP" : true, "PG-NSB" : true, + "PG-SAN" : true, "PG-SHM" : true, "PG-WBK" : true, "PG-WHM" : true, "PG-WPD" : true, + "PH-00" : true, "PH-01" : true, "PH-02" : true, "PH-03" : true, "PH-05" : true, + "PH-06" : true, "PH-07" : true, "PH-08" : true, "PH-09" : true, "PH-10" : true, + "PH-11" : true, "PH-12" : true, "PH-13" : true, "PH-14" : true, "PH-15" : true, + "PH-40" : true, "PH-41" : true, "PH-ABR" : true, "PH-AGN" : true, "PH-AGS" : true, + "PH-AKL" : true, "PH-ALB" : true, "PH-ANT" : true, "PH-APA" : true, "PH-AUR" : true, + "PH-BAN" : true, "PH-BAS" : true, "PH-BEN" : true, "PH-BIL" : true, "PH-BOH" : true, + "PH-BTG" : true, "PH-BTN" : true, "PH-BUK" : true, "PH-BUL" : true, "PH-CAG" : true, + "PH-CAM" : true, "PH-CAN" : true, "PH-CAP" : true, "PH-CAS" : true, "PH-CAT" : true, + "PH-CAV" : true, "PH-CEB" : true, "PH-COM" : true, "PH-DAO" : true, "PH-DAS" : true, + "PH-DAV" : true, "PH-DIN" : true, "PH-EAS" : true, "PH-GUI" : true, "PH-IFU" : true, + "PH-ILI" : true, "PH-ILN" : true, "PH-ILS" : true, "PH-ISA" : true, "PH-KAL" : true, + "PH-LAG" : true, "PH-LAN" : true, "PH-LAS" : true, "PH-LEY" : true, "PH-LUN" : true, + "PH-MAD" : true, "PH-MAG" : true, "PH-MAS" : true, "PH-MDC" : true, "PH-MDR" : true, + "PH-MOU" : true, "PH-MSC" : true, "PH-MSR" : true, "PH-NCO" : true, "PH-NEC" : true, + "PH-NER" : true, "PH-NSA" : true, "PH-NUE" : true, "PH-NUV" : true, "PH-PAM" : true, + "PH-PAN" : true, "PH-PLW" : true, "PH-QUE" : true, "PH-QUI" : true, "PH-RIZ" : true, + "PH-ROM" : true, "PH-SAR" : true, "PH-SCO" : true, "PH-SIG" : true, "PH-SLE" : true, + "PH-SLU" : true, "PH-SOR" : true, "PH-SUK" : true, "PH-SUN" : true, "PH-SUR" : true, + "PH-TAR" : true, "PH-TAW" : true, "PH-WSA" : true, "PH-ZAN" : true, "PH-ZAS" : true, + "PH-ZMB" : true, "PH-ZSI" : true, "PK-BA" : true, "PK-GB" : true, "PK-IS" : true, + "PK-JK" : true, "PK-KP" : true, "PK-PB" : true, "PK-SD" : true, "PK-TA" : true, + "PL-DS" : true, "PL-KP" : true, "PL-LB" : true, "PL-LD" : true, "PL-LU" : true, + "PL-MA" : true, "PL-MZ" : true, "PL-OP" : true, "PL-PD" : true, "PL-PK" : true, + "PL-PM" : true, "PL-SK" : true, "PL-SL" : true, "PL-WN" : true, "PL-WP" : true, + "PL-ZP" : true, "PS-BTH" : true, "PS-DEB" : true, "PS-GZA" : true, "PS-HBN" : true, + "PS-JEM" : true, "PS-JEN" : true, "PS-JRH" : true, "PS-KYS" : true, "PS-NBS" : true, + "PS-NGZ" : true, "PS-QQA" : true, "PS-RBH" : true, "PS-RFH" : true, "PS-SLT" : true, + "PS-TBS" : true, "PS-TKM" : true, "PT-01" : true, "PT-02" : true, "PT-03" : true, + "PT-04" : true, "PT-05" : true, "PT-06" : true, "PT-07" : true, "PT-08" : true, + "PT-09" : true, "PT-10" : true, "PT-11" : true, "PT-12" : true, "PT-13" : true, + "PT-14" : true, "PT-15" : true, "PT-16" : true, "PT-17" : true, "PT-18" : true, + "PT-20" : true, "PT-30" : true, "PW-002" : true, "PW-004" : true, "PW-010" : true, + "PW-050" : true, "PW-100" : true, "PW-150" : true, "PW-212" : true, "PW-214" : true, + "PW-218" : true, "PW-222" : true, "PW-224" : true, "PW-226" : true, "PW-227" : true, + "PW-228" : true, "PW-350" : true, "PW-370" : true, "PY-1" : true, "PY-10" : true, + "PY-11" : true, "PY-12" : true, "PY-13" : true, "PY-14" : true, "PY-15" : true, + "PY-16" : true, "PY-19" : true, "PY-2" : true, "PY-3" : true, "PY-4" : true, + "PY-5" : true, "PY-6" : true, "PY-7" : true, "PY-8" : true, "PY-9" : true, + "PY-ASU" : true, "QA-DA" : true, "QA-KH" : true, "QA-MS" : true, "QA-RA" : true, + "QA-US" : true, "QA-WA" : true, "QA-ZA" : true, "RO-AB" : true, "RO-AG" : true, + "RO-AR" : true, "RO-B" : true, "RO-BC" : true, "RO-BH" : true, "RO-BN" : true, + "RO-BR" : true, "RO-BT" : true, "RO-BV" : true, "RO-BZ" : true, "RO-CJ" : true, + "RO-CL" : true, "RO-CS" : true, "RO-CT" : true, "RO-CV" : true, "RO-DB" : true, + "RO-DJ" : true, "RO-GJ" : true, "RO-GL" : true, "RO-GR" : true, "RO-HD" : true, + "RO-HR" : true, "RO-IF" : true, "RO-IL" : true, "RO-IS" : true, "RO-MH" : true, + "RO-MM" : true, "RO-MS" : true, "RO-NT" : true, "RO-OT" : true, "RO-PH" : true, + "RO-SB" : true, "RO-SJ" : true, "RO-SM" : true, "RO-SV" : true, "RO-TL" : true, + "RO-TM" : true, "RO-TR" : true, "RO-VL" : true, "RO-VN" : true, "RO-VS" : true, + "RS-00" : true, "RS-01" : true, "RS-02" : true, "RS-03" : true, "RS-04" : true, + "RS-05" : true, "RS-06" : true, "RS-07" : true, "RS-08" : true, "RS-09" : true, + "RS-10" : true, "RS-11" : true, "RS-12" : true, "RS-13" : true, "RS-14" : true, + "RS-15" : true, "RS-16" : true, "RS-17" : true, "RS-18" : true, "RS-19" : true, + "RS-20" : true, "RS-21" : true, "RS-22" : true, "RS-23" : true, "RS-24" : true, + "RS-25" : true, "RS-26" : true, "RS-27" : true, "RS-28" : true, "RS-29" : true, + "RS-KM" : true, "RS-VO" : true, "RU-AD" : true, "RU-AL" : true, "RU-ALT" : true, + "RU-AMU" : true, "RU-ARK" : true, "RU-AST" : true, "RU-BA" : true, "RU-BEL" : true, + "RU-BRY" : true, "RU-BU" : true, "RU-CE" : true, "RU-CHE" : true, "RU-CHU" : true, + "RU-CU" : true, "RU-DA" : true, "RU-IN" : true, "RU-IRK" : true, "RU-IVA" : true, + "RU-KAM" : true, "RU-KB" : true, "RU-KC" : true, "RU-KDA" : true, "RU-KEM" : true, + "RU-KGD" : true, "RU-KGN" : true, "RU-KHA" : true, "RU-KHM" : true, "RU-KIR" : true, + "RU-KK" : true, "RU-KL" : true, "RU-KLU" : true, "RU-KO" : true, "RU-KOS" : true, + "RU-KR" : true, "RU-KRS" : true, "RU-KYA" : true, "RU-LEN" : true, "RU-LIP" : true, + "RU-MAG" : true, "RU-ME" : true, "RU-MO" : true, "RU-MOS" : true, "RU-MOW" : true, + "RU-MUR" : true, "RU-NEN" : true, "RU-NGR" : true, "RU-NIZ" : true, "RU-NVS" : true, + "RU-OMS" : true, "RU-ORE" : true, "RU-ORL" : true, "RU-PER" : true, "RU-PNZ" : true, + "RU-PRI" : true, "RU-PSK" : true, "RU-ROS" : true, "RU-RYA" : true, "RU-SA" : true, + "RU-SAK" : true, "RU-SAM" : true, "RU-SAR" : true, "RU-SE" : true, "RU-SMO" : true, + "RU-SPE" : true, "RU-STA" : true, "RU-SVE" : true, "RU-TA" : true, "RU-TAM" : true, + "RU-TOM" : true, "RU-TUL" : true, "RU-TVE" : true, "RU-TY" : true, "RU-TYU" : true, + "RU-UD" : true, "RU-ULY" : true, "RU-VGG" : true, "RU-VLA" : true, "RU-VLG" : true, + "RU-VOR" : true, "RU-YAN" : true, "RU-YAR" : true, "RU-YEV" : true, "RU-ZAB" : true, + "RW-01" : true, "RW-02" : true, "RW-03" : true, "RW-04" : true, "RW-05" : true, + "SA-01" : true, "SA-02" : true, "SA-03" : true, "SA-04" : true, "SA-05" : true, + "SA-06" : true, "SA-07" : true, "SA-08" : true, "SA-09" : true, "SA-10" : true, + "SA-11" : true, "SA-12" : true, "SA-14" : true, "SB-CE" : true, "SB-CH" : true, + "SB-CT" : true, "SB-GU" : true, "SB-IS" : true, "SB-MK" : true, "SB-ML" : true, + "SB-RB" : true, "SB-TE" : true, "SB-WE" : true, "SC-01" : true, "SC-02" : true, + "SC-03" : true, "SC-04" : true, "SC-05" : true, "SC-06" : true, "SC-07" : true, + "SC-08" : true, "SC-09" : true, "SC-10" : true, "SC-11" : true, "SC-12" : true, + "SC-13" : true, "SC-14" : true, "SC-15" : true, "SC-16" : true, "SC-17" : true, + "SC-18" : true, "SC-19" : true, "SC-20" : true, "SC-21" : true, "SC-22" : true, + "SC-23" : true, "SC-24" : true, "SC-25" : true, "SD-DC" : true, "SD-DE" : true, + "SD-DN" : true, "SD-DS" : true, "SD-DW" : true, "SD-GD" : true, "SD-GZ" : true, + "SD-KA" : true, "SD-KH" : true, "SD-KN" : true, "SD-KS" : true, "SD-NB" : true, + "SD-NO" : true, "SD-NR" : true, "SD-NW" : true, "SD-RS" : true, "SD-SI" : true, + "SE-AB" : true, "SE-AC" : true, "SE-BD" : true, "SE-C" : true, "SE-D" : true, + "SE-E" : true, "SE-F" : true, "SE-G" : true, "SE-H" : true, "SE-I" : true, + "SE-K" : true, "SE-M" : true, "SE-N" : true, "SE-O" : true, "SE-S" : true, + "SE-T" : true, "SE-U" : true, "SE-W" : true, "SE-X" : true, "SE-Y" : true, + "SE-Z" : true, "SG-01" : true, "SG-02" : true, "SG-03" : true, "SG-04" : true, + "SG-05" : true, "SH-AC" : true, "SH-HL" : true, "SH-TA" : true, "SI-001" : true, + "SI-002" : true, "SI-003" : true, "SI-004" : true, "SI-005" : true, "SI-006" : true, + "SI-007" : true, "SI-008" : true, "SI-009" : true, "SI-010" : true, "SI-011" : true, + "SI-012" : true, "SI-013" : true, "SI-014" : true, "SI-015" : true, "SI-016" : true, + "SI-017" : true, "SI-018" : true, "SI-019" : true, "SI-020" : true, "SI-021" : true, + "SI-022" : true, "SI-023" : true, "SI-024" : true, "SI-025" : true, "SI-026" : true, + "SI-027" : true, "SI-028" : true, "SI-029" : true, "SI-030" : true, "SI-031" : true, + "SI-032" : true, "SI-033" : true, "SI-034" : true, "SI-035" : true, "SI-036" : true, + "SI-037" : true, "SI-038" : true, "SI-039" : true, "SI-040" : true, "SI-041" : true, + "SI-042" : true, "SI-043" : true, "SI-044" : true, "SI-045" : true, "SI-046" : true, + "SI-047" : true, "SI-048" : true, "SI-049" : true, "SI-050" : true, "SI-051" : true, + "SI-052" : true, "SI-053" : true, "SI-054" : true, "SI-055" : true, "SI-056" : true, + "SI-057" : true, "SI-058" : true, "SI-059" : true, "SI-060" : true, "SI-061" : true, + "SI-062" : true, "SI-063" : true, "SI-064" : true, "SI-065" : true, "SI-066" : true, + "SI-067" : true, "SI-068" : true, "SI-069" : true, "SI-070" : true, "SI-071" : true, + "SI-072" : true, "SI-073" : true, "SI-074" : true, "SI-075" : true, "SI-076" : true, + "SI-077" : true, "SI-078" : true, "SI-079" : true, "SI-080" : true, "SI-081" : true, + "SI-082" : true, "SI-083" : true, "SI-084" : true, "SI-085" : true, "SI-086" : true, + "SI-087" : true, "SI-088" : true, "SI-089" : true, "SI-090" : true, "SI-091" : true, + "SI-092" : true, "SI-093" : true, "SI-094" : true, "SI-095" : true, "SI-096" : true, + "SI-097" : true, "SI-098" : true, "SI-099" : true, "SI-100" : true, "SI-101" : true, + "SI-102" : true, "SI-103" : true, "SI-104" : true, "SI-105" : true, "SI-106" : true, + "SI-107" : true, "SI-108" : true, "SI-109" : true, "SI-110" : true, "SI-111" : true, + "SI-112" : true, "SI-113" : true, "SI-114" : true, "SI-115" : true, "SI-116" : true, + "SI-117" : true, "SI-118" : true, "SI-119" : true, "SI-120" : true, "SI-121" : true, + "SI-122" : true, "SI-123" : true, "SI-124" : true, "SI-125" : true, "SI-126" : true, + "SI-127" : true, "SI-128" : true, "SI-129" : true, "SI-130" : true, "SI-131" : true, + "SI-132" : true, "SI-133" : true, "SI-134" : true, "SI-135" : true, "SI-136" : true, + "SI-137" : true, "SI-138" : true, "SI-139" : true, "SI-140" : true, "SI-141" : true, + "SI-142" : true, "SI-143" : true, "SI-144" : true, "SI-146" : true, "SI-147" : true, + "SI-148" : true, "SI-149" : true, "SI-150" : true, "SI-151" : true, "SI-152" : true, + "SI-153" : true, "SI-154" : true, "SI-155" : true, "SI-156" : true, "SI-157" : true, + "SI-158" : true, "SI-159" : true, "SI-160" : true, "SI-161" : true, "SI-162" : true, + "SI-163" : true, "SI-164" : true, "SI-165" : true, "SI-166" : true, "SI-167" : true, + "SI-168" : true, "SI-169" : true, "SI-170" : true, "SI-171" : true, "SI-172" : true, + "SI-173" : true, "SI-174" : true, "SI-175" : true, "SI-176" : true, "SI-177" : true, + "SI-178" : true, "SI-179" : true, "SI-180" : true, "SI-181" : true, "SI-182" : true, + "SI-183" : true, "SI-184" : true, "SI-185" : true, "SI-186" : true, "SI-187" : true, + "SI-188" : true, "SI-189" : true, "SI-190" : true, "SI-191" : true, "SI-192" : true, + "SI-193" : true, "SI-194" : true, "SI-195" : true, "SI-196" : true, "SI-197" : true, + "SI-198" : true, "SI-199" : true, "SI-200" : true, "SI-201" : true, "SI-202" : true, + "SI-203" : true, "SI-204" : true, "SI-205" : true, "SI-206" : true, "SI-207" : true, + "SI-208" : true, "SI-209" : true, "SI-210" : true, "SI-211" : true, "SK-BC" : true, + "SK-BL" : true, "SK-KI" : true, "SK-NI" : true, "SK-PV" : true, "SK-TA" : true, + "SK-TC" : true, "SK-ZI" : true, "SL-E" : true, "SL-N" : true, "SL-S" : true, + "SL-W" : true, "SM-01" : true, "SM-02" : true, "SM-03" : true, "SM-04" : true, + "SM-05" : true, "SM-06" : true, "SM-07" : true, "SM-08" : true, "SM-09" : true, + "SN-DB" : true, "SN-DK" : true, "SN-FK" : true, "SN-KA" : true, "SN-KD" : true, + "SN-KE" : true, "SN-KL" : true, "SN-LG" : true, "SN-MT" : true, "SN-SE" : true, + "SN-SL" : true, "SN-TC" : true, "SN-TH" : true, "SN-ZG" : true, "SO-AW" : true, + "SO-BK" : true, "SO-BN" : true, "SO-BR" : true, "SO-BY" : true, "SO-GA" : true, + "SO-GE" : true, "SO-HI" : true, "SO-JD" : true, "SO-JH" : true, "SO-MU" : true, + "SO-NU" : true, "SO-SA" : true, "SO-SD" : true, "SO-SH" : true, "SO-SO" : true, + "SO-TO" : true, "SO-WO" : true, "SR-BR" : true, "SR-CM" : true, "SR-CR" : true, + "SR-MA" : true, "SR-NI" : true, "SR-PM" : true, "SR-PR" : true, "SR-SA" : true, + "SR-SI" : true, "SR-WA" : true, "SS-BN" : true, "SS-BW" : true, "SS-EC" : true, + "SS-EE8" : true, "SS-EW" : true, "SS-JG" : true, "SS-LK" : true, "SS-NU" : true, + "SS-UY" : true, "SS-WR" : true, "ST-P" : true, "ST-S" : true, "SV-AH" : true, + "SV-CA" : true, "SV-CH" : true, "SV-CU" : true, "SV-LI" : true, "SV-MO" : true, + "SV-PA" : true, "SV-SA" : true, "SV-SM" : true, "SV-SO" : true, "SV-SS" : true, + "SV-SV" : true, "SV-UN" : true, "SV-US" : true, "SY-DI" : true, "SY-DR" : true, + "SY-DY" : true, "SY-HA" : true, "SY-HI" : true, "SY-HL" : true, "SY-HM" : true, + "SY-ID" : true, "SY-LA" : true, "SY-QU" : true, "SY-RA" : true, "SY-RD" : true, + "SY-SU" : true, "SY-TA" : true, "SZ-HH" : true, "SZ-LU" : true, "SZ-MA" : true, + "SZ-SH" : true, "TD-BA" : true, "TD-BG" : true, "TD-BO" : true, "TD-CB" : true, + "TD-EN" : true, "TD-GR" : true, "TD-HL" : true, "TD-KA" : true, "TD-LC" : true, + "TD-LO" : true, "TD-LR" : true, "TD-MA" : true, "TD-MC" : true, "TD-ME" : true, + "TD-MO" : true, "TD-ND" : true, "TD-OD" : true, "TD-SA" : true, "TD-SI" : true, + "TD-TA" : true, "TD-TI" : true, "TD-WF" : true, "TG-C" : true, "TG-K" : true, + "TG-M" : true, "TG-P" : true, "TG-S" : true, "TH-10" : true, "TH-11" : true, + "TH-12" : true, "TH-13" : true, "TH-14" : true, "TH-15" : true, "TH-16" : true, + "TH-17" : true, "TH-18" : true, "TH-19" : true, "TH-20" : true, "TH-21" : true, + "TH-22" : true, "TH-23" : true, "TH-24" : true, "TH-25" : true, "TH-26" : true, + "TH-27" : true, "TH-30" : true, "TH-31" : true, "TH-32" : true, "TH-33" : true, + "TH-34" : true, "TH-35" : true, "TH-36" : true, "TH-37" : true, "TH-39" : true, + "TH-40" : true, "TH-41" : true, "TH-42" : true, "TH-43" : true, "TH-44" : true, + "TH-45" : true, "TH-46" : true, "TH-47" : true, "TH-48" : true, "TH-49" : true, + "TH-50" : true, "TH-51" : true, "TH-52" : true, "TH-53" : true, "TH-54" : true, + "TH-55" : true, "TH-56" : true, "TH-57" : true, "TH-58" : true, "TH-60" : true, + "TH-61" : true, "TH-62" : true, "TH-63" : true, "TH-64" : true, "TH-65" : true, + "TH-66" : true, "TH-67" : true, "TH-70" : true, "TH-71" : true, "TH-72" : true, + "TH-73" : true, "TH-74" : true, "TH-75" : true, "TH-76" : true, "TH-77" : true, + "TH-80" : true, "TH-81" : true, "TH-82" : true, "TH-83" : true, "TH-84" : true, + "TH-85" : true, "TH-86" : true, "TH-90" : true, "TH-91" : true, "TH-92" : true, + "TH-93" : true, "TH-94" : true, "TH-95" : true, "TH-96" : true, "TH-S" : true, + "TJ-GB" : true, "TJ-KT" : true, "TJ-SU" : true, "TL-AL" : true, "TL-AN" : true, + "TL-BA" : true, "TL-BO" : true, "TL-CO" : true, "TL-DI" : true, "TL-ER" : true, + "TL-LA" : true, "TL-LI" : true, "TL-MF" : true, "TL-MT" : true, "TL-OE" : true, + "TL-VI" : true, "TM-A" : true, "TM-B" : true, "TM-D" : true, "TM-L" : true, + "TM-M" : true, "TM-S" : true, "TN-11" : true, "TN-12" : true, "TN-13" : true, + "TN-14" : true, "TN-21" : true, "TN-22" : true, "TN-23" : true, "TN-31" : true, + "TN-32" : true, "TN-33" : true, "TN-34" : true, "TN-41" : true, "TN-42" : true, + "TN-43" : true, "TN-51" : true, "TN-52" : true, "TN-53" : true, "TN-61" : true, + "TN-71" : true, "TN-72" : true, "TN-73" : true, "TN-81" : true, "TN-82" : true, + "TN-83" : true, "TO-01" : true, "TO-02" : true, "TO-03" : true, "TO-04" : true, + "TO-05" : true, "TR-01" : true, "TR-02" : true, "TR-03" : true, "TR-04" : true, + "TR-05" : true, "TR-06" : true, "TR-07" : true, "TR-08" : true, "TR-09" : true, + "TR-10" : true, "TR-11" : true, "TR-12" : true, "TR-13" : true, "TR-14" : true, + "TR-15" : true, "TR-16" : true, "TR-17" : true, "TR-18" : true, "TR-19" : true, + "TR-20" : true, "TR-21" : true, "TR-22" : true, "TR-23" : true, "TR-24" : true, + "TR-25" : true, "TR-26" : true, "TR-27" : true, "TR-28" : true, "TR-29" : true, + "TR-30" : true, "TR-31" : true, "TR-32" : true, "TR-33" : true, "TR-34" : true, + "TR-35" : true, "TR-36" : true, "TR-37" : true, "TR-38" : true, "TR-39" : true, + "TR-40" : true, "TR-41" : true, "TR-42" : true, "TR-43" : true, "TR-44" : true, + "TR-45" : true, "TR-46" : true, "TR-47" : true, "TR-48" : true, "TR-49" : true, + "TR-50" : true, "TR-51" : true, "TR-52" : true, "TR-53" : true, "TR-54" : true, + "TR-55" : true, "TR-56" : true, "TR-57" : true, "TR-58" : true, "TR-59" : true, + "TR-60" : true, "TR-61" : true, "TR-62" : true, "TR-63" : true, "TR-64" : true, + "TR-65" : true, "TR-66" : true, "TR-67" : true, "TR-68" : true, "TR-69" : true, + "TR-70" : true, "TR-71" : true, "TR-72" : true, "TR-73" : true, "TR-74" : true, + "TR-75" : true, "TR-76" : true, "TR-77" : true, "TR-78" : true, "TR-79" : true, + "TR-80" : true, "TR-81" : true, "TT-ARI" : true, "TT-CHA" : true, "TT-CTT" : true, + "TT-DMN" : true, "TT-ETO" : true, "TT-PED" : true, "TT-POS" : true, "TT-PRT" : true, + "TT-PTF" : true, "TT-RCM" : true, "TT-SFO" : true, "TT-SGE" : true, "TT-SIP" : true, + "TT-SJL" : true, "TT-TUP" : true, "TT-WTO" : true, "TV-FUN" : true, "TV-NIT" : true, + "TV-NKF" : true, "TV-NKL" : true, "TV-NMA" : true, "TV-NMG" : true, "TV-NUI" : true, + "TV-VAI" : true, "TW-CHA" : true, "TW-CYI" : true, "TW-CYQ" : true, "TW-HSQ" : true, + "TW-HSZ" : true, "TW-HUA" : true, "TW-ILA" : true, "TW-KEE" : true, "TW-KHH" : true, + "TW-KHQ" : true, "TW-MIA" : true, "TW-NAN" : true, "TW-PEN" : true, "TW-PIF" : true, + "TW-TAO" : true, "TW-TNN" : true, "TW-TNQ" : true, "TW-TPE" : true, "TW-TPQ" : true, + "TW-TTT" : true, "TW-TXG" : true, "TW-TXQ" : true, "TW-YUN" : true, "TZ-01" : true, + "TZ-02" : true, "TZ-03" : true, "TZ-04" : true, "TZ-05" : true, "TZ-06" : true, + "TZ-07" : true, "TZ-08" : true, "TZ-09" : true, "TZ-10" : true, "TZ-11" : true, + "TZ-12" : true, "TZ-13" : true, "TZ-14" : true, "TZ-15" : true, "TZ-16" : true, + "TZ-17" : true, "TZ-18" : true, "TZ-19" : true, "TZ-20" : true, "TZ-21" : true, + "TZ-22" : true, "TZ-23" : true, "TZ-24" : true, "TZ-25" : true, "TZ-26" : true, + "UA-05" : true, "UA-07" : true, "UA-09" : true, "UA-12" : true, "UA-14" : true, + "UA-18" : true, "UA-21" : true, "UA-23" : true, "UA-26" : true, "UA-30" : true, + "UA-32" : true, "UA-35" : true, "UA-40" : true, "UA-43" : true, "UA-46" : true, + "UA-48" : true, "UA-51" : true, "UA-53" : true, "UA-56" : true, "UA-59" : true, + "UA-61" : true, "UA-63" : true, "UA-65" : true, "UA-68" : true, "UA-71" : true, + "UA-74" : true, "UA-77" : true, "UG-101" : true, "UG-102" : true, "UG-103" : true, + "UG-104" : true, "UG-105" : true, "UG-106" : true, "UG-107" : true, "UG-108" : true, + "UG-109" : true, "UG-110" : true, "UG-111" : true, "UG-112" : true, "UG-113" : true, + "UG-114" : true, "UG-115" : true, "UG-116" : true, "UG-201" : true, "UG-202" : true, + "UG-203" : true, "UG-204" : true, "UG-205" : true, "UG-206" : true, "UG-207" : true, + "UG-208" : true, "UG-209" : true, "UG-210" : true, "UG-211" : true, "UG-212" : true, + "UG-213" : true, "UG-214" : true, "UG-215" : true, "UG-216" : true, "UG-217" : true, + "UG-218" : true, "UG-219" : true, "UG-220" : true, "UG-221" : true, "UG-222" : true, + "UG-223" : true, "UG-224" : true, "UG-301" : true, "UG-302" : true, "UG-303" : true, + "UG-304" : true, "UG-305" : true, "UG-306" : true, "UG-307" : true, "UG-308" : true, + "UG-309" : true, "UG-310" : true, "UG-311" : true, "UG-312" : true, "UG-313" : true, + "UG-314" : true, "UG-315" : true, "UG-316" : true, "UG-317" : true, "UG-318" : true, + "UG-319" : true, "UG-320" : true, "UG-321" : true, "UG-401" : true, "UG-402" : true, + "UG-403" : true, "UG-404" : true, "UG-405" : true, "UG-406" : true, "UG-407" : true, + "UG-408" : true, "UG-409" : true, "UG-410" : true, "UG-411" : true, "UG-412" : true, + "UG-413" : true, "UG-414" : true, "UG-415" : true, "UG-416" : true, "UG-417" : true, + "UG-418" : true, "UG-419" : true, "UG-C" : true, "UG-E" : true, "UG-N" : true, + "UG-W" : true, "UM-67" : true, "UM-71" : true, "UM-76" : true, "UM-79" : true, + "UM-81" : true, "UM-84" : true, "UM-86" : true, "UM-89" : true, "UM-95" : true, + "US-AK" : true, "US-AL" : true, "US-AR" : true, "US-AS" : true, "US-AZ" : true, + "US-CA" : true, "US-CO" : true, "US-CT" : true, "US-DC" : true, "US-DE" : true, + "US-FL" : true, "US-GA" : true, "US-GU" : true, "US-HI" : true, "US-IA" : true, + "US-ID" : true, "US-IL" : true, "US-IN" : true, "US-KS" : true, "US-KY" : true, + "US-LA" : true, "US-MA" : true, "US-MD" : true, "US-ME" : true, "US-MI" : true, + "US-MN" : true, "US-MO" : true, "US-MP" : true, "US-MS" : true, "US-MT" : true, + "US-NC" : true, "US-ND" : true, "US-NE" : true, "US-NH" : true, "US-NJ" : true, + "US-NM" : true, "US-NV" : true, "US-NY" : true, "US-OH" : true, "US-OK" : true, + "US-OR" : true, "US-PA" : true, "US-PR" : true, "US-RI" : true, "US-SC" : true, + "US-SD" : true, "US-TN" : true, "US-TX" : true, "US-UM" : true, "US-UT" : true, + "US-VA" : true, "US-VI" : true, "US-VT" : true, "US-WA" : true, "US-WI" : true, + "US-WV" : true, "US-WY" : true, "UY-AR" : true, "UY-CA" : true, "UY-CL" : true, + "UY-CO" : true, "UY-DU" : true, "UY-FD" : true, "UY-FS" : true, "UY-LA" : true, + "UY-MA" : true, "UY-MO" : true, "UY-PA" : true, "UY-RN" : true, "UY-RO" : true, + "UY-RV" : true, "UY-SA" : true, "UY-SJ" : true, "UY-SO" : true, "UY-TA" : true, + "UY-TT" : true, "UZ-AN" : true, "UZ-BU" : true, "UZ-FA" : true, "UZ-JI" : true, + "UZ-NG" : true, "UZ-NW" : true, "UZ-QA" : true, "UZ-QR" : true, "UZ-SA" : true, + "UZ-SI" : true, "UZ-SU" : true, "UZ-TK" : true, "UZ-TO" : true, "UZ-XO" : true, + "VC-01" : true, "VC-02" : true, "VC-03" : true, "VC-04" : true, "VC-05" : true, + "VC-06" : true, "VE-A" : true, "VE-B" : true, "VE-C" : true, "VE-D" : true, + "VE-E" : true, "VE-F" : true, "VE-G" : true, "VE-H" : true, "VE-I" : true, + "VE-J" : true, "VE-K" : true, "VE-L" : true, "VE-M" : true, "VE-N" : true, + "VE-O" : true, "VE-P" : true, "VE-R" : true, "VE-S" : true, "VE-T" : true, + "VE-U" : true, "VE-V" : true, "VE-W" : true, "VE-X" : true, "VE-Y" : true, + "VE-Z" : true, "VN-01" : true, "VN-02" : true, "VN-03" : true, "VN-04" : true, + "VN-05" : true, "VN-06" : true, "VN-07" : true, "VN-09" : true, "VN-13" : true, + "VN-14" : true, "VN-15" : true, "VN-18" : true, "VN-20" : true, "VN-21" : true, + "VN-22" : true, "VN-23" : true, "VN-24" : true, "VN-25" : true, "VN-26" : true, + "VN-27" : true, "VN-28" : true, "VN-29" : true, "VN-30" : true, "VN-31" : true, + "VN-32" : true, "VN-33" : true, "VN-34" : true, "VN-35" : true, "VN-36" : true, + "VN-37" : true, "VN-39" : true, "VN-40" : true, "VN-41" : true, "VN-43" : true, + "VN-44" : true, "VN-45" : true, "VN-46" : true, "VN-47" : true, "VN-49" : true, + "VN-50" : true, "VN-51" : true, "VN-52" : true, "VN-53" : true, "VN-54" : true, + "VN-55" : true, "VN-56" : true, "VN-57" : true, "VN-58" : true, "VN-59" : true, + "VN-61" : true, "VN-63" : true, "VN-66" : true, "VN-67" : true, "VN-68" : true, + "VN-69" : true, "VN-70" : true, "VN-71" : true, "VN-72" : true, "VN-73" : true, + "VN-CT" : true, "VN-DN" : true, "VN-HN" : true, "VN-HP" : true, "VN-SG" : true, + "VU-MAP" : true, "VU-PAM" : true, "VU-SAM" : true, "VU-SEE" : true, "VU-TAE" : true, + "VU-TOB" : true, "WS-AA" : true, "WS-AL" : true, "WS-AT" : true, "WS-FA" : true, + "WS-GE" : true, "WS-GI" : true, "WS-PA" : true, "WS-SA" : true, "WS-TU" : true, + "WS-VF" : true, "WS-VS" : true, "YE-AB" : true, "YE-AD" : true, "YE-AM" : true, + "YE-BA" : true, "YE-DA" : true, "YE-DH" : true, "YE-HD" : true, "YE-HJ" : true, + "YE-IB" : true, "YE-JA" : true, "YE-LA" : true, "YE-MA" : true, "YE-MR" : true, + "YE-MU" : true, "YE-MW" : true, "YE-RA" : true, "YE-SD" : true, "YE-SH" : true, + "YE-SN" : true, "YE-TA" : true, "ZA-EC" : true, "ZA-FS" : true, "ZA-GP" : true, + "ZA-LP" : true, "ZA-MP" : true, "ZA-NC" : true, "ZA-NW" : true, "ZA-WC" : true, + "ZA-ZN" : true, "ZM-01" : true, "ZM-02" : true, "ZM-03" : true, "ZM-04" : true, + "ZM-05" : true, "ZM-06" : true, "ZM-07" : true, "ZM-08" : true, "ZM-09" : true, + "ZW-BU" : true, "ZW-HA" : true, "ZW-MA" : true, "ZW-MC" : true, "ZW-ME" : true, + "ZW-MI" : true, "ZW-MN" : true, "ZW-MS" : true, "ZW-MV" : true, "ZW-MW" : true, +} diff --git a/vendor/github.com/go-playground/validator/v10/currency_codes.go b/vendor/github.com/go-playground/validator/v10/currency_codes.go new file mode 100644 index 00000000..a5cd9b18 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/currency_codes.go @@ -0,0 +1,79 @@ +package validator + +var iso4217 = map[string]bool{ + "AFN": true, "EUR": true, "ALL": true, "DZD": true, "USD": true, + "AOA": true, "XCD": true, "ARS": true, "AMD": true, "AWG": true, + "AUD": true, "AZN": true, "BSD": true, "BHD": true, "BDT": true, + "BBD": true, "BYN": true, "BZD": true, "XOF": true, "BMD": true, + "INR": true, "BTN": true, "BOB": true, "BOV": true, "BAM": true, + "BWP": true, "NOK": true, "BRL": true, "BND": true, "BGN": true, + "BIF": true, "CVE": true, "KHR": true, "XAF": true, "CAD": true, + "KYD": true, "CLP": true, "CLF": true, "CNY": true, "COP": true, + "COU": true, "KMF": true, "CDF": true, "NZD": true, "CRC": true, + "HRK": true, "CUP": true, "CUC": true, "ANG": true, "CZK": true, + "DKK": true, "DJF": true, "DOP": true, "EGP": true, "SVC": true, + "ERN": true, "SZL": true, "ETB": true, "FKP": true, "FJD": true, + "XPF": true, "GMD": true, "GEL": true, "GHS": true, "GIP": true, + "GTQ": true, "GBP": true, "GNF": true, "GYD": true, "HTG": true, + "HNL": true, "HKD": true, "HUF": true, "ISK": true, "IDR": true, + "XDR": true, "IRR": true, "IQD": true, "ILS": true, "JMD": true, + "JPY": true, "JOD": true, "KZT": true, "KES": true, "KPW": true, + "KRW": true, "KWD": true, "KGS": true, "LAK": true, "LBP": true, + "LSL": true, "ZAR": true, "LRD": true, "LYD": true, "CHF": true, + "MOP": true, "MKD": true, "MGA": true, "MWK": true, "MYR": true, + "MVR": true, "MRU": true, "MUR": true, "XUA": true, "MXN": true, + "MXV": true, "MDL": true, "MNT": true, "MAD": true, "MZN": true, + "MMK": true, "NAD": true, "NPR": true, "NIO": true, "NGN": true, + "OMR": true, "PKR": true, "PAB": true, "PGK": true, "PYG": true, + "PEN": true, "PHP": true, "PLN": true, "QAR": true, "RON": true, + "RUB": true, "RWF": true, "SHP": true, "WST": true, "STN": true, + "SAR": true, "RSD": true, "SCR": true, "SLL": true, "SGD": true, + "XSU": true, "SBD": true, "SOS": true, "SSP": true, "LKR": true, + "SDG": true, "SRD": true, "SEK": true, "CHE": true, "CHW": true, + "SYP": true, "TWD": true, "TJS": true, "TZS": true, "THB": true, + "TOP": true, "TTD": true, "TND": true, "TRY": true, "TMT": true, + "UGX": true, "UAH": true, "AED": true, "USN": true, "UYU": true, + "UYI": true, "UYW": true, "UZS": true, "VUV": true, "VES": true, + "VND": true, "YER": true, "ZMW": true, "ZWL": true, "XBA": true, + "XBB": true, "XBC": true, "XBD": true, "XTS": true, "XXX": true, + "XAU": true, "XPD": true, "XPT": true, "XAG": true, +} + +var iso4217_numeric = map[int]bool{ + 8: true, 12: true, 32: true, 36: true, 44: true, + 48: true, 50: true, 51: true, 52: true, 60: true, + 64: true, 68: true, 72: true, 84: true, 90: true, + 96: true, 104: true, 108: true, 116: true, 124: true, + 132: true, 136: true, 144: true, 152: true, 156: true, + 170: true, 174: true, 188: true, 191: true, 192: true, + 203: true, 208: true, 214: true, 222: true, 230: true, + 232: true, 238: true, 242: true, 262: true, 270: true, + 292: true, 320: true, 324: true, 328: true, 332: true, + 340: true, 344: true, 348: true, 352: true, 356: true, + 360: true, 364: true, 368: true, 376: true, 388: true, + 392: true, 398: true, 400: true, 404: true, 408: true, + 410: true, 414: true, 417: true, 418: true, 422: true, + 426: true, 430: true, 434: true, 446: true, 454: true, + 458: true, 462: true, 480: true, 484: true, 496: true, + 498: true, 504: true, 512: true, 516: true, 524: true, + 532: true, 533: true, 548: true, 554: true, 558: true, + 566: true, 578: true, 586: true, 590: true, 598: true, + 600: true, 604: true, 608: true, 634: true, 643: true, + 646: true, 654: true, 682: true, 690: true, 694: true, + 702: true, 704: true, 706: true, 710: true, 728: true, + 748: true, 752: true, 756: true, 760: true, 764: true, + 776: true, 780: true, 784: true, 788: true, 800: true, + 807: true, 818: true, 826: true, 834: true, 840: true, + 858: true, 860: true, 882: true, 886: true, 901: true, + 927: true, 928: true, 929: true, 930: true, 931: true, + 932: true, 933: true, 934: true, 936: true, 938: true, + 940: true, 941: true, 943: true, 944: true, 946: true, + 947: true, 948: true, 949: true, 950: true, 951: true, + 952: true, 953: true, 955: true, 956: true, 957: true, + 958: true, 959: true, 960: true, 961: true, 962: true, + 963: true, 964: true, 965: true, 967: true, 968: true, + 969: true, 970: true, 971: true, 972: true, 973: true, + 975: true, 976: true, 977: true, 978: true, 979: true, + 980: true, 981: true, 984: true, 985: true, 986: true, + 990: true, 994: true, 997: true, 999: true, +} diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go index eafad0db..8c258479 100644 --- a/vendor/github.com/go-playground/validator/v10/doc.go +++ b/vendor/github.com/go-playground/validator/v10/doc.go @@ -7,6 +7,14 @@ and has the ability to dive into arrays and maps of any type. see more examples https://github.com/go-playground/validator/tree/master/_examples +Singleton + +Validator is designed to be thread-safe and used as a singleton instance. +It caches information about your struct and validations, +in essence only parsing your validation tags once per struct type. +Using multiple instances neglects the benefit of caching. +The not thread-safe functions are explicitly marked as such in the documentation. + Validation Functions Return Type error Doing things this way is actually the way the standard library does, see the @@ -726,6 +734,12 @@ This validates that a string value contains unicode alphanumeric characters only Usage: alphanumunicode +Boolean + +This validates that a string value can successfully be parsed into a boolean with strconv.ParseBool + + Usage: boolean + Number This validates that a string value contains number values only. @@ -811,6 +825,12 @@ This validates that a string value is valid JSON Usage: json +JWT String + +This validates that a string value is a valid JWT + + Usage: jwt + File path This validates that a string value contains a valid file path and that diff --git a/vendor/github.com/go-playground/validator/v10/go.mod b/vendor/github.com/go-playground/validator/v10/go.mod deleted file mode 100644 index 53c4820c..00000000 --- a/vendor/github.com/go-playground/validator/v10/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/go-playground/validator/v10 - -go 1.13 - -require ( - github.com/go-playground/assert/v2 v2.0.1 - github.com/go-playground/locales v0.13.0 - github.com/go-playground/universal-translator v0.17.0 - github.com/leodido/go-urn v1.2.0 - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/text v0.3.2 // indirect -) diff --git a/vendor/github.com/go-playground/validator/v10/go.sum b/vendor/github.com/go-playground/validator/v10/go.sum deleted file mode 100644 index 4b00cf66..00000000 --- a/vendor/github.com/go-playground/validator/v10/go.sum +++ /dev/null @@ -1,33 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e h1:FDhOuMEY4JVRztM/gsbk+IKUQ8kj74bxZrgw87eMMVc= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go index ddcf785a..df00c4eb 100644 --- a/vendor/github.com/go-playground/validator/v10/regexes.go +++ b/vendor/github.com/go-playground/validator/v10/regexes.go @@ -48,6 +48,7 @@ const ( uRLEncodedRegexString = `^(?:[^%]|%[0-9A-Fa-f]{2})*$` hTMLEncodedRegexString = `&#[x]?([0-9a-fA-F]{2})|(>)|(<)|(")|(&)+[;]?` hTMLRegexString = `<[/]?([a-zA-Z]+).*?>` + jWTRegexString = "^[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]*$" splitParamsRegexString = `'[^']*'|\S+` bicRegexString = `^[A-Za-z]{6}[A-Za-z0-9]{2}([A-Za-z0-9]{3})?$` ) @@ -98,6 +99,7 @@ var ( uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString) hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString) hTMLRegex = regexp.MustCompile(hTMLRegexString) + jWTRegex = regexp.MustCompile(jWTRegexString) splitParamsRegex = regexp.MustCompile(splitParamsRegexString) bicRegex = regexp.MustCompile(bicRegexString) ) diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go index 9569c0dd..2a4fad02 100644 --- a/vendor/github.com/go-playground/validator/v10/validator.go +++ b/vendor/github.com/go-playground/validator/v10/validator.go @@ -227,7 +227,7 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr } } - if !ct.hasTag { + if ct == nil || !ct.hasTag { return } diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go index 8e27707e..973964fc 100644 --- a/vendor/github.com/go-playground/validator/v10/validator_instance.go +++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go @@ -89,6 +89,10 @@ type Validate struct { } // New returns a new instance of 'validate' with sane defaults. +// Validate is designed to be thread-safe and used as a singleton instance. +// It caches information about your struct and validations, +// in essence only parsing your validation tags once per struct type. +// Using multiple instances neglects the benefit of caching. func New() *Validate { tc := new(tagCache) @@ -207,11 +211,11 @@ func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx, callValidationE func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilCheckable bool) error { if len(tag) == 0 { - return errors.New("Function Key cannot be empty") + return errors.New("function Key cannot be empty") } if fn == nil { - return errors.New("Function cannot be empty") + return errors.New("function cannot be empty") } _, ok := restrictedTags[tag] diff --git a/vendor/github.com/go-redis/cache/v8/go.mod b/vendor/github.com/go-redis/cache/v8/go.mod deleted file mode 100644 index fb3e4e64..00000000 --- a/vendor/github.com/go-redis/cache/v8/go.mod +++ /dev/null @@ -1,19 +0,0 @@ -module github.com/go-redis/cache/v8 - -go 1.13 - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-redis/redis/v8 v8.4.4 - github.com/golang/protobuf v1.4.3 // indirect - github.com/klauspost/compress v1.12.2 - github.com/onsi/ginkgo v1.14.2 - github.com/onsi/gomega v1.10.4 - github.com/vmihailenco/bufpool v0.1.11 - github.com/vmihailenco/go-tinylfu v0.2.0 - github.com/vmihailenco/msgpack/v5 v5.1.0 - golang.org/x/exp v0.0.0-20201221025956-e89b829e73ea - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - google.golang.org/protobuf v1.25.0 // indirect - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect -) diff --git a/vendor/github.com/go-redis/cache/v8/go.sum b/vendor/github.com/go-redis/cache/v8/go.sum deleted file mode 100644 index 3558f9c2..00000000 --- a/vendor/github.com/go-redis/cache/v8/go.sum +++ /dev/null @@ -1,175 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-redis/redis/v8 v8.4.4 h1:fGqgxCTR1sydaKI00oQf3OmkU/DIe/I/fYXvGklCIuc= -github.com/go-redis/redis/v8 v8.4.4/go.mod h1:nA0bQuF0i5JFx4Ta9RZxGKXFrQ8cRWntra97f0196iY= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8= -github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= -github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= -github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/vmihailenco/bufpool v0.1.11 h1:gOq2WmBrq0i2yW5QJ16ykccQ4wH9UyEsgLm6czKAd94= -github.com/vmihailenco/bufpool v0.1.11/go.mod h1:AFf/MOy3l2CFTKbxwt0mp2MwnqjNEs5H/UxrkA5jxTQ= -github.com/vmihailenco/go-tinylfu v0.2.0 h1:gRe/WurdOHaNrayn1anyWOgLkeC8xf0234kyLvkQWxM= -github.com/vmihailenco/go-tinylfu v0.2.0/go.mod h1:BLjA2pesPf7BH0jjFgrgB9uEgekHC4p9i8378iKOvdk= -github.com/vmihailenco/msgpack/v5 v5.1.0 h1:+od5YbEXxW95SPlW6beocmt8nOtlh83zqat5Ip9Hwdc= -github.com/vmihailenco/msgpack/v5 v5.1.0/go.mod h1:C5gboKD0TJPqWDTVTtrQNfRbiBwHZGo8UTqP/9/XvLI= -github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= -github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -go.opentelemetry.io/otel v0.15.0 h1:CZFy2lPhxd4HlhZnYK8gRyDotksO3Ip9rBweY1vVYJw= -go.opentelemetry.io/otel v0.15.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= -golang.org/x/exp v0.0.0-20201221025956-e89b829e73ea h1:GnGfrp0fiNhiBS/v/aCFTmfEWgkvxW4Qiu8oM2/IfZ4= -golang.org/x/exp v0.0.0-20201221025956-e89b829e73ea/go.mod h1:I6l2HNBLBZEcrOoCpyKLdY2lHoRZ8lI4x60KMCQDft4= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/go-redis/redis/v8/go.mod b/vendor/github.com/go-redis/redis/v8/go.mod deleted file mode 100644 index aa13d2ef..00000000 --- a/vendor/github.com/go-redis/redis/v8/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/go-redis/redis/v8 - -go 1.13 - -require ( - github.com/cespare/xxhash/v2 v2.1.1 - github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f - github.com/onsi/ginkgo v1.15.0 - github.com/onsi/gomega v1.10.5 - go.opentelemetry.io/otel/metric v0.20.0 -) diff --git a/vendor/github.com/go-redis/redis/v8/go.sum b/vendor/github.com/go-redis/redis/v8/go.sum deleted file mode 100644 index b8309364..00000000 --- a/vendor/github.com/go-redis/redis/v8/go.sum +++ /dev/null @@ -1,99 +0,0 @@ -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/go-sql-driver/mysql/go.mod b/vendor/github.com/go-sql-driver/mysql/go.mod deleted file mode 100644 index fffbf6a9..00000000 --- a/vendor/github.com/go-sql-driver/mysql/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/go-sql-driver/mysql - -go 1.10 diff --git a/vendor/github.com/gofrs/uuid/.travis.yml b/vendor/github.com/gofrs/uuid/.travis.yml deleted file mode 100644 index 0783aaa9..00000000 --- a/vendor/github.com/gofrs/uuid/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -language: go -sudo: false -go: - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - tip -matrix: - allow_failures: - - go: tip - fast_finish: true -before_install: - - go get golang.org/x/tools/cmd/cover -script: - - go test ./... -race -coverprofile=coverage.txt -covermode=atomic -after_success: - - bash <(curl -s https://codecov.io/bash) -notifications: - email: false diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md index 2685a832..48303001 100644 --- a/vendor/github.com/gofrs/uuid/README.md +++ b/vendor/github.com/gofrs/uuid/README.md @@ -106,3 +106,4 @@ func main() { * [RFC-4122](https://tools.ietf.org/html/rfc4122) * [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) +* [New UUID Formats RFC Draft (Peabody) Rev 02](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-02) diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go index 2783d9e7..38bf6850 100644 --- a/vendor/github.com/gofrs/uuid/generator.go +++ b/vendor/github.com/gofrs/uuid/generator.go @@ -26,6 +26,7 @@ import ( "crypto/rand" "crypto/sha1" "encoding/binary" + "errors" "fmt" "hash" "io" @@ -66,12 +67,45 @@ func NewV5(ns UUID, name string) UUID { return DefaultGenerator.NewV5(ns, name) } +// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of +// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit +// order being adjusted to allow the UUID to be k-sortable. +// +// This is implemented based on revision 02 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func NewV6() (UUID, error) { + return DefaultGenerator.NewV6() +} + +// NewV7 returns a k-sortable UUID based on the current UNIX epoch, with the +// ability to configure the timestamp's precision from millisecond all the way +// to nanosecond. The additional precision is supported by reducing the amount +// of pseudorandom data that makes up the rest of the UUID. +// +// If an unknown Precision argument is passed to this method it will panic. As +// such it's strongly encouraged to use the package-provided constants for this +// value. +// +// This is implemented based on revision 02 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func NewV7(p Precision) (UUID, error) { + return DefaultGenerator.NewV7(p) +} + // Generator provides an interface for generating UUIDs. type Generator interface { NewV1() (UUID, error) NewV3(ns UUID, name string) UUID NewV4() (UUID, error) NewV5(ns UUID, name string) UUID + NewV6() (UUID, error) + NewV7(Precision) (UUID, error) } // Gen is a reference UUID generator based on the specifications laid out in @@ -97,6 +131,10 @@ type Gen struct { lastTime uint64 clockSequence uint16 hardwareAddr [6]byte + + v7LastTime uint64 + v7LastSubsec uint64 + v7ClockSequence uint16 } // interface check -- build will fail if *Gen doesn't satisfy Generator @@ -182,7 +220,39 @@ func (g *Gen) NewV5(ns UUID, name string) UUID { return u } -// getClockSequence returns the epoch and clock sequence. +// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of +// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit +// order being adjusted to allow the UUID to be k-sortable. +// +// This is implemented based on revision 02 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func (g *Gen) NewV6() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[10:]); err != nil { + return Nil, err + } + + timeNow, clockSeq, err := g.getClockSequence() + if err != nil { + return Nil, err + } + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow>>28)) // set time_high + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>12)) // set time_mid + binary.BigEndian.PutUint16(u[6:], uint16(timeNow&0xfff)) // set time_low (minus four version bits) + binary.BigEndian.PutUint16(u[8:], clockSeq&0x3fff) // set clk_seq_hi_res (minus two variant bits) + + u.SetVersion(V6) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// getClockSequence returns the epoch and clock sequence for V1 and V6 UUIDs. func (g *Gen) getClockSequence() (uint64, uint16, error) { var err error g.clockSequenceOnce.Do(func() { @@ -210,6 +280,244 @@ func (g *Gen) getClockSequence() (uint64, uint16, error) { return timeNow, g.clockSequence, nil } +// Precision is used to configure the V7 generator, to specify how precise the +// timestamp within the UUID should be. +type Precision byte + +const ( + NanosecondPrecision Precision = iota + MicrosecondPrecision + MillisecondPrecision +) + +func (p Precision) String() string { + switch p { + case NanosecondPrecision: + return "nanosecond" + + case MicrosecondPrecision: + return "microsecond" + + case MillisecondPrecision: + return "millisecond" + + default: + return "unknown" + } +} + +// Duration returns the time.Duration for a specific precision. If the Precision +// value is not known, this returns 0. +func (p Precision) Duration() time.Duration { + switch p { + case NanosecondPrecision: + return time.Nanosecond + + case MicrosecondPrecision: + return time.Microsecond + + case MillisecondPrecision: + return time.Millisecond + + default: + return 0 + } +} + +// NewV7 returns a k-sortable UUID based on the current UNIX epoch, with the +// ability to configure the timestamp's precision from millisecond all the way +// to nanosecond. The additional precision is supported by reducing the amount +// of pseudorandom data that makes up the rest of the UUID. +// +// If an unknown Precision argument is passed to this method it will panic. As +// such it's strongly encouraged to use the package-provided constants for this +// value. +// +// This is implemented based on revision 02 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func (g *Gen) NewV7(p Precision) (UUID, error) { + var u UUID + var err error + + switch p { + case NanosecondPrecision: + u, err = g.newV7Nano() + + case MicrosecondPrecision: + u, err = g.newV7Micro() + + case MillisecondPrecision: + u, err = g.newV7Milli() + + default: + panic(fmt.Sprintf("unknown precision value %d", p)) + } + + if err != nil { + return Nil, err + } + + u.SetVersion(V7) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +func (g *Gen) newV7Milli() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[8:]); err != nil { + return Nil, err + } + + sec, nano, seq, err := g.getV7ClockSequence(MillisecondPrecision) + if err != nil { + return Nil, err + } + + msec := (nano / 1000000) & 0xfff + + d := (sec << 28) // set unixts field + d |= (msec << 16) // set msec field + d |= (uint64(seq) & 0xfff) // set seq field + + binary.BigEndian.PutUint64(u[:], d) + + return u, nil +} + +func (g *Gen) newV7Micro() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[10:]); err != nil { + return Nil, err + } + + sec, nano, seq, err := g.getV7ClockSequence(MicrosecondPrecision) + if err != nil { + return Nil, err + } + + usec := nano / 1000 + usech := (usec << 4) & 0xfff0000 + usecl := usec & 0xfff + + d := (sec << 28) // set unixts field + d |= usech | usecl // set usec fields + + binary.BigEndian.PutUint64(u[:], d) + binary.BigEndian.PutUint16(u[8:], seq) + + return u, nil +} + +func (g *Gen) newV7Nano() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[11:]); err != nil { + return Nil, err + } + + sec, nano, seq, err := g.getV7ClockSequence(NanosecondPrecision) + if err != nil { + return Nil, err + } + + nano &= 0x3fffffffff + nanoh := nano >> 26 + nanom := (nano >> 14) & 0xfff + nanol := uint16(nano & 0x3fff) + + d := (sec << 28) // set unixts field + d |= (nanoh << 16) | nanom // set nsec high and med fields + + binary.BigEndian.PutUint64(u[:], d) + binary.BigEndian.PutUint16(u[8:], nanol) // set nsec low field + + u[10] = byte(seq) // set seq field + + return u, nil +} + +const ( + maxSeq14 = (1 << 14) - 1 + maxSeq12 = (1 << 12) - 1 + maxSeq8 = (1 << 8) - 1 +) + +// getV7ClockSequence returns the unix epoch, nanoseconds of current second, and +// the sequence for V7 UUIDs. +func (g *Gen) getV7ClockSequence(p Precision) (epoch uint64, nano uint64, seq uint16, err error) { + g.storageMutex.Lock() + defer g.storageMutex.Unlock() + + tn := g.epochFunc() + unix := uint64(tn.Unix()) + nsec := uint64(tn.Nanosecond()) + + // V7 UUIDs have more precise requirements around how the clock sequence + // value is generated and used. Specifically they require that the sequence + // be zero, unless we've already generated a UUID within this unit of time + // (millisecond, microsecond, or nanosecond) at which point you should + // increment the sequence. Likewise if time has warped backwards for some reason (NTP + // adjustment?), we also increment the clock sequence to reduce the risk of a + // collision. + switch { + case unix < g.v7LastTime: + g.v7ClockSequence++ + + case unix > g.v7LastTime: + g.v7ClockSequence = 0 + + case unix == g.v7LastTime: + switch p { + case NanosecondPrecision: + if nsec <= g.v7LastSubsec { + if g.v7ClockSequence >= maxSeq8 { + return 0, 0, 0, errors.New("generating nanosecond precision UUIDv7s too fast: internal clock sequence would roll over") + } + + g.v7ClockSequence++ + } else { + g.v7ClockSequence = 0 + } + + case MicrosecondPrecision: + if nsec/1000 <= g.v7LastSubsec/1000 { + if g.v7ClockSequence >= maxSeq14 { + return 0, 0, 0, errors.New("generating microsecond precision UUIDv7s too fast: internal clock sequence would roll over") + } + + g.v7ClockSequence++ + } else { + g.v7ClockSequence = 0 + } + + case MillisecondPrecision: + if nsec/1000000 <= g.v7LastSubsec/1000000 { + if g.v7ClockSequence >= maxSeq12 { + return 0, 0, 0, errors.New("generating millisecond precision UUIDv7s too fast: internal clock sequence would roll over") + } + + g.v7ClockSequence++ + } else { + g.v7ClockSequence = 0 + } + + default: + panic(fmt.Sprintf("unknown precision value %d", p)) + } + } + + g.v7LastTime = unix + g.v7LastSubsec = nsec + + return unix, nsec, g.v7ClockSequence, nil +} + // Returns the hardware address. func (g *Gen) getHardwareAddr() ([]byte, error) { var err error diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go index 78aed6e2..f314b845 100644 --- a/vendor/github.com/gofrs/uuid/uuid.go +++ b/vendor/github.com/gofrs/uuid/uuid.go @@ -20,11 +20,13 @@ // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // Package uuid provides implementations of the Universally Unique Identifier -// (UUID), as specified in RFC-4122, +// (UUID), as specified in RFC-4122 and the Peabody RFC Draft (revision 02). // -// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. +// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. The +// Peabody UUID RFC Draft[2] provides the specification for the new k-sortable +// UUIDs, versions 6 and 7. // -// DCE 1.1[2] provides the specification for version 2, but version 2 support +// DCE 1.1[3] provides the specification for version 2, but version 2 support // was removed from this package in v4 due to some concerns with the // specification itself. Reading the spec, it seems that it would result in // generating UUIDs that aren't very unique. In having read the spec it seemed @@ -34,7 +36,8 @@ // ensure we were understanding the specification correctly. // // [1] https://tools.ietf.org/html/rfc4122 -// [2] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 +// [2] https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-02 +// [3] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 package uuid import ( @@ -60,6 +63,9 @@ const ( V3 // Version 3 (namespace name-based) V4 // Version 4 (random) V5 // Version 5 (namespace name-based) + V6 // Version 6 (k-sortable timestamp and random data) [peabody draft] + V7 // Version 7 (k-sortable timestamp, with configurable precision, and random data) [peabody draft] + _ // Version 8 (k-sortable timestamp, meant for custom implementations) [peabody draft] [not implemented] ) // UUID layout variants. @@ -88,6 +94,7 @@ const _100nsPerSecond = 10000000 func (t Timestamp) Time() (time.Time, error) { secs := uint64(t) / _100nsPerSecond nsecs := 100 * (uint64(t) % _100nsPerSecond) + return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil } @@ -98,12 +105,34 @@ func TimestampFromV1(u UUID) (Timestamp, error) { err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version()) return 0, err } + low := binary.BigEndian.Uint32(u[0:4]) mid := binary.BigEndian.Uint16(u[4:6]) hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff + return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil } +// TimestampFromV6 returns the Timestamp embedded within a V6 UUID. This +// function returns an error if the UUID is any version other than 6. +// +// This is implemented based on revision 01 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func TimestampFromV6(u UUID) (Timestamp, error) { + if u.Version() != 6 { + return 0, fmt.Errorf("uuid: %s is version %d, not version 6", u, u.Version()) + } + + hi := binary.BigEndian.Uint32(u[0:4]) + mid := binary.BigEndian.Uint16(u[4:6]) + low := binary.BigEndian.Uint16(u[6:8]) & 0xfff + + return Timestamp(uint64(low) + (uint64(mid) << 12) + (uint64(hi) << 28)), nil +} + // String parse helpers. var ( urnPrefix = []byte("urn:uuid:") @@ -122,6 +151,11 @@ var ( NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) ) +// IsNil returns if the UUID is equal to the nil UUID +func (u UUID) IsNil() bool { + return u == Nil +} + // Version returns the algorithm version used to generate the UUID. func (u UUID) Version() byte { return u[6] >> 4 diff --git a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md index 0b621493..32966f59 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md +++ b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md @@ -1,6 +1,6 @@ ## Migration Guide (v4.0.0) -Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0]), the import path will be: +Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), the import path will be: "github.com/golang-jwt/jwt/v4" @@ -19,4 +19,4 @@ go mod tidy ## Older releases (before v3.2.0) -The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. \ No newline at end of file +The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md index 96fe3b97..3072d24a 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/README.md +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -1,12 +1,12 @@ # jwt-go [![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) -[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt) +[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v4.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). -Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compataibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. -See the `MIGRATION_GUIDE.md` for more information. +Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. +See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. > After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. @@ -52,7 +52,7 @@ Here's an example of an extension that integrates with multiple Google Cloud Pla ## Compliance -This library was last reviewed to comply with [RTF 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: +This library was last reviewed to comply with [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: * In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. @@ -91,6 +91,7 @@ Each signing method expects a different object type for its signing keys. See th * The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation * The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation * The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation ### JWT and OAuth diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go index 7c2f33bc..41cc8265 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/claims.go +++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -12,9 +12,122 @@ type Claims interface { Valid() error } -// StandardClaims are a structured version of the Claims Section, as referenced at -// https://tools.ietf.org/html/rfc7519#section-4.1 -// See examples for how to use this with your own claim types +// RegisteredClaims are a structured version of the JWT Claims Set, +// restricted to Registered Claim Names, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 +// +// This type can be used on its own, but then additional private and +// public claims embedded in the JWT will not be parsed. The typical usecase +// therefore is to embedded this in a user-defined claim type. +// +// See examples for how to use this with your own claim types. +type RegisteredClaims struct { + // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1 + Issuer string `json:"iss,omitempty"` + + // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2 + Subject string `json:"sub,omitempty"` + + // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3 + Audience ClaimStrings `json:"aud,omitempty"` + + // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4 + ExpiresAt *NumericDate `json:"exp,omitempty"` + + // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5 + NotBefore *NumericDate `json:"nbf,omitempty"` + + // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6 + IssuedAt *NumericDate `json:"iat,omitempty"` + + // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7 + ID string `json:"jti,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c RegisteredClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := now.Sub(c.ExpiresAt.Time) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = fmt.Errorf("token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). +// If req is false, it will return true, if exp is unset. +func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool { + if c.ExpiresAt == nil { + return verifyExp(nil, cmp, req) + } + + return verifyExp(&c.ExpiresAt.Time, cmp, req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool { + if c.IssuedAt == nil { + return verifyIat(nil, cmp, req) + } + + return verifyIat(&c.IssuedAt.Time, cmp, req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool { + if c.NotBefore == nil { + return verifyNbf(nil, cmp, req) + } + + return verifyNbf(&c.NotBefore.Time, cmp, req) +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// StandardClaims are a structured version of the JWT Claims Set, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the +// specification exactly, since they were based on an earlier draft of the +// specification and not updated. The main difference is that they only +// support integer-based date fields and singular audiences. This might lead to +// incompatibilities with other JWT implementations. The use of this is discouraged, instead +// the newer RegisteredClaims struct should be used. +// +// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct. type StandardClaims struct { Audience string `json:"aud,omitempty"` ExpiresAt int64 `json:"exp,omitempty"` @@ -41,7 +154,7 @@ func (c StandardClaims) Valid() error { } if !c.VerifyIssuedAt(now, false) { - vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Inner = fmt.Errorf("token used before issued") vErr.Errors |= ValidationErrorIssuedAt } @@ -63,16 +176,37 @@ func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { return verifyAud([]string{c.Audience}, cmp, req) } -// VerifyExpiresAt compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). +// If req is false, it will return true, if exp is unset. func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { - return verifyExp(c.ExpiresAt, cmp, req) + if c.ExpiresAt == 0 { + return verifyExp(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.ExpiresAt, 0) + return verifyExp(&t, time.Unix(cmp, 0), req) } -// VerifyIssuedAt compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { - return verifyIat(c.IssuedAt, cmp, req) + if c.IssuedAt == 0 { + return verifyIat(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.IssuedAt, 0) + return verifyIat(&t, time.Unix(cmp, 0), req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + if c.NotBefore == 0 { + return verifyNbf(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.NotBefore, 0) + return verifyNbf(&t, time.Unix(cmp, 0), req) } // VerifyIssuer compares the iss claim against cmp. @@ -81,12 +215,6 @@ func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { return verifyIss(c.Issuer, cmp, req) } -// VerifyNotBefore compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { - return verifyNbf(c.NotBefore, cmp, req) -} - // ----- helpers func verifyAud(aud []string, cmp string, required bool) bool { @@ -112,18 +240,25 @@ func verifyAud(aud []string, cmp string, required bool) bool { return result } -func verifyExp(exp int64, now int64, required bool) bool { - if exp == 0 { +func verifyExp(exp *time.Time, now time.Time, required bool) bool { + if exp == nil { + return !required + } + return now.Before(*exp) +} + +func verifyIat(iat *time.Time, now time.Time, required bool) bool { + if iat == nil { return !required } - return now <= exp + return now.After(*iat) || now.Equal(*iat) } -func verifyIat(iat int64, now int64, required bool) bool { - if iat == 0 { +func verifyNbf(nbf *time.Time, now time.Time, required bool) bool { + if nbf == nil { return !required } - return now >= iat + return now.After(*nbf) || now.Equal(*nbf) } func verifyIss(iss string, cmp string, required bool) bool { @@ -136,10 +271,3 @@ func verifyIss(iss string, cmp string, required bool) bool { return false } } - -func verifyNbf(nbf int64, now int64, required bool) bool { - if nbf == 0 { - return !required - } - return now >= nbf -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go index 9f40dc0c..07d3aacd 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go @@ -3,7 +3,9 @@ package jwt import ( "errors" + "crypto" "crypto/ed25519" + "crypto/rand" ) var ( @@ -62,20 +64,22 @@ func (m *SigningMethodEd25519) Verify(signingString, signature string, key inter // Sign implements token signing for the SigningMethod. // For this signing method, key must be an ed25519.PrivateKey func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) { - var ed25519Key ed25519.PrivateKey + var ed25519Key crypto.Signer var ok bool - if ed25519Key, ok = key.(ed25519.PrivateKey); !ok { + if ed25519Key, ok = key.(crypto.Signer); !ok { return "", ErrInvalidKeyType } - // ed25519.Sign panics if private key not equal to ed25519.PrivateKeySize - // this allows to avoid recover usage - if len(ed25519Key) != ed25519.PrivateKeySize { + if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { return "", ErrInvalidKey } // Sign the string and return the encoded result - sig := ed25519.Sign(ed25519Key, []byte(signingString)) + // ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0) + sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0)) + if err != nil { + return "", err + } return EncodeSegment(sig), nil } diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go index f309878b..b9d18e49 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/errors.go +++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go @@ -53,6 +53,11 @@ func (e ValidationError) Error() string { } } +// Unwrap gives errors.Is and errors.As access to the inner error. +func (e *ValidationError) Unwrap() error { + return e.Inner +} + // No errors func (e *ValidationError) valid() bool { return e.Errors == 0 diff --git a/vendor/github.com/golang-jwt/jwt/v4/go.mod b/vendor/github.com/golang-jwt/jwt/v4/go.mod deleted file mode 100644 index 6bc53fdc..00000000 --- a/vendor/github.com/golang-jwt/jwt/v4/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/golang-jwt/jwt/v4 - -go 1.15 diff --git a/vendor/github.com/golang-jwt/jwt/v4/go.sum b/vendor/github.com/golang-jwt/jwt/v4/go.sum deleted file mode 100644 index e69de29b..00000000 diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go index 7e00e753..e7da633b 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go +++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go @@ -3,6 +3,7 @@ package jwt import ( "encoding/json" "errors" + "time" // "fmt" ) @@ -31,65 +32,92 @@ func (m MapClaims) VerifyAudience(cmp string, req bool) bool { return verifyAud(aud, cmp, req) } -// VerifyExpiresAt compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset +// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// If req is false, it will return true, if exp is unset. func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { - exp, ok := m["exp"] + cmpTime := time.Unix(cmp, 0) + + v, ok := m["exp"] if !ok { return !req } - switch expType := exp.(type) { + + switch exp := v.(type) { case float64: - return verifyExp(int64(expType), cmp, req) + if exp == 0 { + return verifyExp(nil, cmpTime, req) + } + + return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req) case json.Number: - v, _ := expType.Int64() - return verifyExp(v, cmp, req) + v, _ := exp.Float64() + + return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req) } + return false } -// VerifyIssuedAt compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset +// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { - iat, ok := m["iat"] + cmpTime := time.Unix(cmp, 0) + + v, ok := m["iat"] if !ok { return !req } - switch iatType := iat.(type) { + + switch iat := v.(type) { case float64: - return verifyIat(int64(iatType), cmp, req) + if iat == 0 { + return verifyIat(nil, cmpTime, req) + } + + return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req) case json.Number: - v, _ := iatType.Int64() - return verifyIat(v, cmp, req) + v, _ := iat.Float64() + + return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req) } - return false -} -// VerifyIssuer compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { - iss, _ := m["iss"].(string) - return verifyIss(iss, cmp, req) + return false } -// VerifyNotBefore compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { - nbf, ok := m["nbf"] + cmpTime := time.Unix(cmp, 0) + + v, ok := m["nbf"] if !ok { return !req } - switch nbfType := nbf.(type) { + + switch nbf := v.(type) { case float64: - return verifyNbf(int64(nbfType), cmp, req) + if nbf == 0 { + return verifyNbf(nil, cmpTime, req) + } + + return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req) case json.Number: - v, _ := nbfType.Int64() - return verifyNbf(v, cmp, req) + v, _ := nbf.Float64() + + return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req) } + return false } -// Valid calidates time based claims "exp, iat, nbf". +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Valid validates time based claims "exp, iat, nbf". // There is no accounting for clock skew. // As well, if any of the above claims are not in the token, it will still // be considered a valid claim. diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go index 0c811f31..2f61a69d 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -8,14 +8,36 @@ import ( ) type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder - SkipClaimsValidation bool // Skip claims validation during token parsing + // If populated, only these methods will be considered valid. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + ValidMethods []string + + // Use JSON Number format in JSON decoder. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + UseJSONNumber bool + + // Skip claims validation during token parsing. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + SkipClaimsValidation bool } -// Parse parses, validates, and returns a token. +// NewParser creates a new Parser with the specified options +func NewParser(options ...ParserOption) *Parser { + p := &Parser{} + + // loop through our parsing options and apply them + for _, option := range options { + option(p) + } + + return p +} + +// Parse parses, validates, verifies the signature and returns the parsed token. // keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) } diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go new file mode 100644 index 00000000..0fede4f1 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go @@ -0,0 +1,29 @@ +package jwt + +// ParserOption is used to implement functional-style options that modify the behaviour of the parser. To add +// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that +// takes a *Parser type as input and manipulates its configuration accordingly. +type ParserOption func(*Parser) + +// WithValidMethods is an option to supply algorithm methods that the parser will check. Only those methods will be considered valid. +// It is heavily encouraged to use this option in order to prevent attacks such as https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/. +func WithValidMethods(methods []string) ParserOption { + return func(p *Parser) { + p.ValidMethods = methods + } +} + +// WithJSONNumber is an option to configure the underyling JSON parser with UseNumber +func WithJSONNumber() ParserOption { + return func(p *Parser) { + p.UseJSONNumber = true + } +} + +// WithoutClaimsValidation is an option to disable claims validation. This option should only be used if you exactly know +// what you are doing. +func WithoutClaimsValidation() ParserOption { + return func(p *Parser) { + p.SkipClaimsValidation = true + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go index 3269170f..241ae9c6 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/signing_method.go +++ b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go @@ -33,3 +33,14 @@ func GetSigningMethod(alg string) (method SigningMethod) { } return } + +// GetAlgorithms returns a list of registered "alg" names +func GetAlgorithms() (algs []string) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + for alg := range signingMethods { + algs = append(algs, alg) + } + return +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go index b896acb0..12344138 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/token.go +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -7,6 +7,14 @@ import ( "time" ) + +// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515 +// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations +// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global +// variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. +// To use the non-recommended decoding, set this boolean to `true` prior to using this package. +var DecodePaddingAllowed bool + // TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). // You can override it to use another time value. This is useful for testing or if your // server uses a different time zone than your tokens. @@ -29,11 +37,12 @@ type Token struct { Valid bool // Is the token valid? Populated when you Parse/Verify a token } -// New creates a new Token. Takes a signing method +// New creates a new Token with the specified signing method and an empty map of claims. func New(method SigningMethod) *Token { return NewWithClaims(method, MapClaims{}) } +// NewWithClaims creates a new Token with the specified signing method and claims. func NewWithClaims(method SigningMethod, claims Claims) *Token { return &Token{ Header: map[string]interface{}{ @@ -45,7 +54,8 @@ func NewWithClaims(method SigningMethod, claims Claims) *Token { } } -// SignedString retrieves the complete, signed token +// SignedString creates and returns a complete, signed JWT. +// The token is signed using the SigningMethod specified in the token. func (t *Token) SignedString(key interface{}) (string, error) { var sig, sstr string var err error @@ -82,15 +92,19 @@ func (t *Token) SigningString() (string, error) { return strings.Join(parts, "."), nil } -// Parse parses, validates, and returns a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return new(Parser).Parse(tokenString, keyFunc) +// Parse parses, validates, verifies the signature and returns the parsed token. +// keyFunc will receive the parsed token and should return the cryptographic key +// for verifying the signature. +// The caller is strongly encouraged to set the WithValidMethods option to +// validate the 'alg' claim in the token matches the expected algorithm. +// For more details about the importance of validating the 'alg' claim, +// see https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ +func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).Parse(tokenString, keyFunc) } -func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) } // EncodeSegment encodes a JWT specific base64url encoding with padding stripped @@ -106,5 +120,12 @@ func EncodeSegment(seg []byte) string { // Deprecated: In a future release, we will demote this function to a non-exported function, since it // should only be used internally func DecodeSegment(seg string) ([]byte, error) { + if DecodePaddingAllowed { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + return base64.URLEncoding.DecodeString(seg) + } + return base64.RawURLEncoding.DecodeString(seg) } diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go new file mode 100644 index 00000000..80b1b969 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/types.go @@ -0,0 +1,127 @@ +package jwt + +import ( + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +// TimePrecision sets the precision of times and dates within this library. +// This has an influence on the precision of times when comparing expiry or +// other related time fields. Furthermore, it is also the precision of times +// when serializing. +// +// For backwards compatibility the default precision is set to seconds, so that +// no fractional timestamps are generated. +var TimePrecision = time.Second + +// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially +// its MarshalJSON function. +// +// If it is set to true (the default), it will always serialize the type as an +// array of strings, even if it just contains one element, defaulting to the behaviour +// of the underlying []string. If it is set to false, it will serialize to a single +// string, if it contains one element. Otherwise, it will serialize to an array of strings. +var MarshalSingleStringAsArray = true + +// NumericDate represents a JSON numeric date value, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-2. +type NumericDate struct { + time.Time +} + +// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct. +// It will truncate the timestamp according to the precision specified in TimePrecision. +func NewNumericDate(t time.Time) *NumericDate { + return &NumericDate{t.Truncate(TimePrecision)} +} + +// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a +// UNIX epoch with the float fraction representing non-integer seconds. +func newNumericDateFromSeconds(f float64) *NumericDate { + round, frac := math.Modf(f) + return NewNumericDate(time.Unix(int64(round), int64(frac*1e9))) +} + +// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch +// represented in NumericDate to a byte array, using the precision specified in TimePrecision. +func (date NumericDate) MarshalJSON() (b []byte, err error) { + f := float64(date.Truncate(TimePrecision).UnixNano()) / float64(time.Second) + + return []byte(strconv.FormatFloat(f, 'f', -1, 64)), nil +} + +// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a +// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch +// with either integer or non-integer seconds. +func (date *NumericDate) UnmarshalJSON(b []byte) (err error) { + var ( + number json.Number + f float64 + ) + + if err = json.Unmarshal(b, &number); err != nil { + return fmt.Errorf("could not parse NumericData: %w", err) + } + + if f, err = number.Float64(); err != nil { + return fmt.Errorf("could not convert json number value to float: %w", err) + } + + n := newNumericDateFromSeconds(f) + *date = *n + + return nil +} + +// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string. +// This type is necessary, since the "aud" claim can either be a single string or an array. +type ClaimStrings []string + +func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { + var value interface{} + + if err = json.Unmarshal(data, &value); err != nil { + return err + } + + var aud []string + + switch v := value.(type) { + case string: + aud = append(aud, v) + case []string: + aud = ClaimStrings(v) + case []interface{}: + for _, vv := range v { + vs, ok := vv.(string) + if !ok { + return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)} + } + aud = append(aud, vs) + } + case nil: + return nil + default: + return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + } + + *s = aud + + return +} + +func (s ClaimStrings) MarshalJSON() (b []byte, err error) { + // This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field, + // only contains one element, it MAY be serialized as a single string. This may or may not be + // desired based on the ecosystem of other JWT library used, so we make it configurable by the + // variable MarshalSingleStringAsArray. + if len(s) == 1 && !MarshalSingleStringAsArray { + return json.Marshal(s[0]) + } + + return json.Marshal([]string(s)) +} diff --git a/vendor/github.com/gomodule/redigo/LICENSE b/vendor/github.com/gomodule/redigo/LICENSE deleted file mode 100644 index 67db8588..00000000 --- a/vendor/github.com/gomodule/redigo/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/gomodule/redigo/redis/commandinfo.go b/vendor/github.com/gomodule/redigo/redis/commandinfo.go deleted file mode 100644 index b6df6a25..00000000 --- a/vendor/github.com/gomodule/redigo/redis/commandinfo.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2014 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "strings" -) - -const ( - connectionWatchState = 1 << iota - connectionMultiState - connectionSubscribeState - connectionMonitorState -) - -type commandInfo struct { - // Set or Clear these states on connection. - Set, Clear int -} - -var commandInfos = map[string]commandInfo{ - "WATCH": {Set: connectionWatchState}, - "UNWATCH": {Clear: connectionWatchState}, - "MULTI": {Set: connectionMultiState}, - "EXEC": {Clear: connectionWatchState | connectionMultiState}, - "DISCARD": {Clear: connectionWatchState | connectionMultiState}, - "PSUBSCRIBE": {Set: connectionSubscribeState}, - "SUBSCRIBE": {Set: connectionSubscribeState}, - "MONITOR": {Set: connectionMonitorState}, -} - -func init() { - for n, ci := range commandInfos { - commandInfos[strings.ToLower(n)] = ci - } -} - -func lookupCommandInfo(commandName string) commandInfo { - if ci, ok := commandInfos[commandName]; ok { - return ci - } - return commandInfos[strings.ToUpper(commandName)] -} diff --git a/vendor/github.com/gomodule/redigo/redis/conn.go b/vendor/github.com/gomodule/redigo/redis/conn.go deleted file mode 100644 index 33b43be6..00000000 --- a/vendor/github.com/gomodule/redigo/redis/conn.go +++ /dev/null @@ -1,736 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bufio" - "bytes" - "context" - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "net/url" - "regexp" - "strconv" - "sync" - "time" -) - -var ( - _ ConnWithTimeout = (*conn)(nil) -) - -// conn is the low-level implementation of Conn -type conn struct { - // Shared - mu sync.Mutex - pending int - err error - conn net.Conn - - // Read - readTimeout time.Duration - br *bufio.Reader - - // Write - writeTimeout time.Duration - bw *bufio.Writer - - // Scratch space for formatting argument length. - // '*' or '$', length, "\r\n" - lenScratch [32]byte - - // Scratch space for formatting integers and floats. - numScratch [40]byte -} - -// DialTimeout acts like Dial but takes timeouts for establishing the -// connection to the server, writing a command and reading a reply. -// -// Deprecated: Use Dial with options instead. -func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { - return Dial(network, address, - DialConnectTimeout(connectTimeout), - DialReadTimeout(readTimeout), - DialWriteTimeout(writeTimeout)) -} - -// DialOption specifies an option for dialing a Redis server. -type DialOption struct { - f func(*dialOptions) -} - -type dialOptions struct { - readTimeout time.Duration - writeTimeout time.Duration - dialer *net.Dialer - dialContext func(ctx context.Context, network, addr string) (net.Conn, error) - db int - username string - password string - clientName string - useTLS bool - skipVerify bool - tlsConfig *tls.Config -} - -// DialReadTimeout specifies the timeout for reading a single command reply. -func DialReadTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.readTimeout = d - }} -} - -// DialWriteTimeout specifies the timeout for writing a single command. -func DialWriteTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.writeTimeout = d - }} -} - -// DialConnectTimeout specifies the timeout for connecting to the Redis server when -// no DialNetDial option is specified. -func DialConnectTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.dialer.Timeout = d - }} -} - -// DialKeepAlive specifies the keep-alive period for TCP connections to the Redis server -// when no DialNetDial option is specified. -// If zero, keep-alives are not enabled. If no DialKeepAlive option is specified then -// the default of 5 minutes is used to ensure that half-closed TCP sessions are detected. -func DialKeepAlive(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.dialer.KeepAlive = d - }} -} - -// DialNetDial specifies a custom dial function for creating TCP -// connections, otherwise a net.Dialer customized via the other options is used. -// DialNetDial overrides DialConnectTimeout and DialKeepAlive. -func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption { - return DialOption{func(do *dialOptions) { - do.dialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { - return dial(network, addr) - } - }} -} - -// DialContextFunc specifies a custom dial function with context for creating TCP -// connections, otherwise a net.Dialer customized via the other options is used. -// DialContextFunc overrides DialConnectTimeout and DialKeepAlive. -func DialContextFunc(f func(ctx context.Context, network, addr string) (net.Conn, error)) DialOption { - return DialOption{func(do *dialOptions) { - do.dialContext = f - }} -} - -// DialDatabase specifies the database to select when dialing a connection. -func DialDatabase(db int) DialOption { - return DialOption{func(do *dialOptions) { - do.db = db - }} -} - -// DialPassword specifies the password to use when connecting to -// the Redis server. -func DialPassword(password string) DialOption { - return DialOption{func(do *dialOptions) { - do.password = password - }} -} - -// DialUsername specifies the username to use when connecting to -// the Redis server when Redis ACLs are used. -func DialUsername(username string) DialOption { - return DialOption{func(do *dialOptions) { - do.username = username - }} -} - -// DialClientName specifies a client name to be used -// by the Redis server connection. -func DialClientName(name string) DialOption { - return DialOption{func(do *dialOptions) { - do.clientName = name - }} -} - -// DialTLSConfig specifies the config to use when a TLS connection is dialed. -// Has no effect when not dialing a TLS connection. -func DialTLSConfig(c *tls.Config) DialOption { - return DialOption{func(do *dialOptions) { - do.tlsConfig = c - }} -} - -// DialTLSSkipVerify disables server name verification when connecting over -// TLS. Has no effect when not dialing a TLS connection. -func DialTLSSkipVerify(skip bool) DialOption { - return DialOption{func(do *dialOptions) { - do.skipVerify = skip - }} -} - -// DialUseTLS specifies whether TLS should be used when connecting to the -// server. This option is ignore by DialURL. -func DialUseTLS(useTLS bool) DialOption { - return DialOption{func(do *dialOptions) { - do.useTLS = useTLS - }} -} - -// Dial connects to the Redis server at the given network and -// address using the specified options. -func Dial(network, address string, options ...DialOption) (Conn, error) { - return DialContext(context.Background(), network, address, options...) -} - -// DialContext connects to the Redis server at the given network and -// address using the specified options and context. -func DialContext(ctx context.Context, network, address string, options ...DialOption) (Conn, error) { - do := dialOptions{ - dialer: &net.Dialer{ - KeepAlive: time.Minute * 5, - }, - } - for _, option := range options { - option.f(&do) - } - if do.dialContext == nil { - do.dialContext = do.dialer.DialContext - } - - netConn, err := do.dialContext(ctx, network, address) - if err != nil { - return nil, err - } - - if do.useTLS { - var tlsConfig *tls.Config - if do.tlsConfig == nil { - tlsConfig = &tls.Config{InsecureSkipVerify: do.skipVerify} - } else { - tlsConfig = cloneTLSConfig(do.tlsConfig) - } - if tlsConfig.ServerName == "" { - host, _, err := net.SplitHostPort(address) - if err != nil { - netConn.Close() - return nil, err - } - tlsConfig.ServerName = host - } - - tlsConn := tls.Client(netConn, tlsConfig) - if err := tlsConn.Handshake(); err != nil { - netConn.Close() - return nil, err - } - netConn = tlsConn - } - - c := &conn{ - conn: netConn, - bw: bufio.NewWriter(netConn), - br: bufio.NewReader(netConn), - readTimeout: do.readTimeout, - writeTimeout: do.writeTimeout, - } - - if do.password != "" { - authArgs := make([]interface{}, 0, 2) - if do.username != "" { - authArgs = append(authArgs, do.username) - } - authArgs = append(authArgs, do.password) - if _, err := c.Do("AUTH", authArgs...); err != nil { - netConn.Close() - return nil, err - } - } - - if do.clientName != "" { - if _, err := c.Do("CLIENT", "SETNAME", do.clientName); err != nil { - netConn.Close() - return nil, err - } - } - - if do.db != 0 { - if _, err := c.Do("SELECT", do.db); err != nil { - netConn.Close() - return nil, err - } - } - - return c, nil -} - -var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`) - -// DialURL connects to a Redis server at the given URL using the Redis -// URI scheme. URLs should follow the draft IANA specification for the -// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis). -func DialURL(rawurl string, options ...DialOption) (Conn, error) { - u, err := url.Parse(rawurl) - if err != nil { - return nil, err - } - - if u.Scheme != "redis" && u.Scheme != "rediss" { - return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) - } - - if u.Opaque != "" { - return nil, fmt.Errorf("invalid redis URL, url is opaque: %s", rawurl) - } - - // As per the IANA draft spec, the host defaults to localhost and - // the port defaults to 6379. - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - // assume port is missing - host = u.Host - port = "6379" - } - if host == "" { - host = "localhost" - } - address := net.JoinHostPort(host, port) - - if u.User != nil { - password, isSet := u.User.Password() - if isSet { - options = append(options, DialUsername(u.User.Username()), DialPassword(password)) - } - } - - match := pathDBRegexp.FindStringSubmatch(u.Path) - if len(match) == 2 { - db := 0 - if len(match[1]) > 0 { - db, err = strconv.Atoi(match[1]) - if err != nil { - return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) - } - } - if db != 0 { - options = append(options, DialDatabase(db)) - } - } else if u.Path != "" { - return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) - } - - options = append(options, DialUseTLS(u.Scheme == "rediss")) - - return Dial("tcp", address, options...) -} - -// NewConn returns a new Redigo connection for the given net connection. -func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { - return &conn{ - conn: netConn, - bw: bufio.NewWriter(netConn), - br: bufio.NewReader(netConn), - readTimeout: readTimeout, - writeTimeout: writeTimeout, - } -} - -func (c *conn) Close() error { - c.mu.Lock() - err := c.err - if c.err == nil { - c.err = errors.New("redigo: closed") - err = c.conn.Close() - } - c.mu.Unlock() - return err -} - -func (c *conn) fatal(err error) error { - c.mu.Lock() - if c.err == nil { - c.err = err - // Close connection to force errors on subsequent calls and to unblock - // other reader or writer. - c.conn.Close() - } - c.mu.Unlock() - return err -} - -func (c *conn) Err() error { - c.mu.Lock() - err := c.err - c.mu.Unlock() - return err -} - -func (c *conn) writeLen(prefix byte, n int) error { - c.lenScratch[len(c.lenScratch)-1] = '\n' - c.lenScratch[len(c.lenScratch)-2] = '\r' - i := len(c.lenScratch) - 3 - for { - c.lenScratch[i] = byte('0' + n%10) - i -= 1 - n = n / 10 - if n == 0 { - break - } - } - c.lenScratch[i] = prefix - _, err := c.bw.Write(c.lenScratch[i:]) - return err -} - -func (c *conn) writeString(s string) error { - c.writeLen('$', len(s)) - c.bw.WriteString(s) - _, err := c.bw.WriteString("\r\n") - return err -} - -func (c *conn) writeBytes(p []byte) error { - c.writeLen('$', len(p)) - c.bw.Write(p) - _, err := c.bw.WriteString("\r\n") - return err -} - -func (c *conn) writeInt64(n int64) error { - return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) -} - -func (c *conn) writeFloat64(n float64) error { - return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) -} - -func (c *conn) writeCommand(cmd string, args []interface{}) error { - c.writeLen('*', 1+len(args)) - if err := c.writeString(cmd); err != nil { - return err - } - for _, arg := range args { - if err := c.writeArg(arg, true); err != nil { - return err - } - } - return nil -} - -func (c *conn) writeArg(arg interface{}, argumentTypeOK bool) (err error) { - switch arg := arg.(type) { - case string: - return c.writeString(arg) - case []byte: - return c.writeBytes(arg) - case int: - return c.writeInt64(int64(arg)) - case int64: - return c.writeInt64(arg) - case float64: - return c.writeFloat64(arg) - case bool: - if arg { - return c.writeString("1") - } else { - return c.writeString("0") - } - case nil: - return c.writeString("") - case Argument: - if argumentTypeOK { - return c.writeArg(arg.RedisArg(), false) - } - // See comment in default clause below. - var buf bytes.Buffer - fmt.Fprint(&buf, arg) - return c.writeBytes(buf.Bytes()) - default: - // This default clause is intended to handle builtin numeric types. - // The function should return an error for other types, but this is not - // done for compatibility with previous versions of the package. - var buf bytes.Buffer - fmt.Fprint(&buf, arg) - return c.writeBytes(buf.Bytes()) - } -} - -type protocolError string - -func (pe protocolError) Error() string { - return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) -} - -// readLine reads a line of input from the RESP stream. -func (c *conn) readLine() ([]byte, error) { - // To avoid allocations, attempt to read the line using ReadSlice. This - // call typically succeeds. The known case where the call fails is when - // reading the output from the MONITOR command. - p, err := c.br.ReadSlice('\n') - if err == bufio.ErrBufferFull { - // The line does not fit in the bufio.Reader's buffer. Fall back to - // allocating a buffer for the line. - buf := append([]byte{}, p...) - for err == bufio.ErrBufferFull { - p, err = c.br.ReadSlice('\n') - buf = append(buf, p...) - } - p = buf - } - if err != nil { - return nil, err - } - i := len(p) - 2 - if i < 0 || p[i] != '\r' { - return nil, protocolError("bad response line terminator") - } - return p[:i], nil -} - -// parseLen parses bulk string and array lengths. -func parseLen(p []byte) (int, error) { - if len(p) == 0 { - return -1, protocolError("malformed length") - } - - if p[0] == '-' && len(p) == 2 && p[1] == '1' { - // handle $-1 and $-1 null replies. - return -1, nil - } - - var n int - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return -1, protocolError("illegal bytes in length") - } - n += int(b - '0') - } - - return n, nil -} - -// parseInt parses an integer reply. -func parseInt(p []byte) (interface{}, error) { - if len(p) == 0 { - return 0, protocolError("malformed integer") - } - - var negate bool - if p[0] == '-' { - negate = true - p = p[1:] - if len(p) == 0 { - return 0, protocolError("malformed integer") - } - } - - var n int64 - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return 0, protocolError("illegal bytes in length") - } - n += int64(b - '0') - } - - if negate { - n = -n - } - return n, nil -} - -var ( - okReply interface{} = "OK" - pongReply interface{} = "PONG" -) - -func (c *conn) readReply() (interface{}, error) { - line, err := c.readLine() - if err != nil { - return nil, err - } - if len(line) == 0 { - return nil, protocolError("short response line") - } - switch line[0] { - case '+': - switch string(line[1:]) { - case "OK": - // Avoid allocation for frequent "+OK" response. - return okReply, nil - case "PONG": - // Avoid allocation in PING command benchmarks :) - return pongReply, nil - default: - return string(line[1:]), nil - } - case '-': - return Error(string(line[1:])), nil - case ':': - return parseInt(line[1:]) - case '$': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - p := make([]byte, n) - _, err = io.ReadFull(c.br, p) - if err != nil { - return nil, err - } - if line, err := c.readLine(); err != nil { - return nil, err - } else if len(line) != 0 { - return nil, protocolError("bad bulk string format") - } - return p, nil - case '*': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - r := make([]interface{}, n) - for i := range r { - r[i], err = c.readReply() - if err != nil { - return nil, err - } - } - return r, nil - } - return nil, protocolError("unexpected response line") -} - -func (c *conn) Send(cmd string, args ...interface{}) error { - c.mu.Lock() - c.pending += 1 - c.mu.Unlock() - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - if err := c.writeCommand(cmd, args); err != nil { - return c.fatal(err) - } - return nil -} - -func (c *conn) Flush() error { - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - if err := c.bw.Flush(); err != nil { - return c.fatal(err) - } - return nil -} - -func (c *conn) Receive() (interface{}, error) { - return c.ReceiveWithTimeout(c.readTimeout) -} - -func (c *conn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { - var deadline time.Time - if timeout != 0 { - deadline = time.Now().Add(timeout) - } - c.conn.SetReadDeadline(deadline) - - if reply, err = c.readReply(); err != nil { - return nil, c.fatal(err) - } - // When using pub/sub, the number of receives can be greater than the - // number of sends. To enable normal use of the connection after - // unsubscribing from all channels, we do not decrement pending to a - // negative value. - // - // The pending field is decremented after the reply is read to handle the - // case where Receive is called before Send. - c.mu.Lock() - if c.pending > 0 { - c.pending -= 1 - } - c.mu.Unlock() - if err, ok := reply.(Error); ok { - return nil, err - } - return -} - -func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { - return c.DoWithTimeout(c.readTimeout, cmd, args...) -} - -func (c *conn) DoWithTimeout(readTimeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { - c.mu.Lock() - pending := c.pending - c.pending = 0 - c.mu.Unlock() - - if cmd == "" && pending == 0 { - return nil, nil - } - - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - - if cmd != "" { - if err := c.writeCommand(cmd, args); err != nil { - return nil, c.fatal(err) - } - } - - if err := c.bw.Flush(); err != nil { - return nil, c.fatal(err) - } - - var deadline time.Time - if readTimeout != 0 { - deadline = time.Now().Add(readTimeout) - } - c.conn.SetReadDeadline(deadline) - - if cmd == "" { - reply := make([]interface{}, pending) - for i := range reply { - r, e := c.readReply() - if e != nil { - return nil, c.fatal(e) - } - reply[i] = r - } - return reply, nil - } - - var err error - var reply interface{} - for i := 0; i <= pending; i++ { - var e error - if reply, e = c.readReply(); e != nil { - return nil, c.fatal(e) - } - if e, ok := reply.(Error); ok && err == nil { - err = e - } - } - return reply, err -} diff --git a/vendor/github.com/gomodule/redigo/redis/doc.go b/vendor/github.com/gomodule/redigo/redis/doc.go deleted file mode 100644 index 69ad506c..00000000 --- a/vendor/github.com/gomodule/redigo/redis/doc.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package redis is a client for the Redis database. -// -// The Redigo FAQ (https://github.com/gomodule/redigo/wiki/FAQ) contains more -// documentation about this package. -// -// Connections -// -// The Conn interface is the primary interface for working with Redis. -// Applications create connections by calling the Dial, DialWithTimeout or -// NewConn functions. In the future, functions will be added for creating -// sharded and other types of connections. -// -// The application must call the connection Close method when the application -// is done with the connection. -// -// Executing Commands -// -// The Conn interface has a generic method for executing Redis commands: -// -// Do(commandName string, args ...interface{}) (reply interface{}, err error) -// -// The Redis command reference (http://redis.io/commands) lists the available -// commands. An example of using the Redis APPEND command is: -// -// n, err := conn.Do("APPEND", "key", "value") -// -// The Do method converts command arguments to bulk strings for transmission -// to the server as follows: -// -// Go Type Conversion -// []byte Sent as is -// string Sent as is -// int, int64 strconv.FormatInt(v) -// float64 strconv.FormatFloat(v, 'g', -1, 64) -// bool true -> "1", false -> "0" -// nil "" -// all other types fmt.Fprint(w, v) -// -// Redis command reply types are represented using the following Go types: -// -// Redis type Go type -// error redis.Error -// integer int64 -// simple string string -// bulk string []byte or nil if value not present. -// array []interface{} or nil if value not present. -// -// Use type assertions or the reply helper functions to convert from -// interface{} to the specific Go type for the command result. -// -// Pipelining -// -// Connections support pipelining using the Send, Flush and Receive methods. -// -// Send(commandName string, args ...interface{}) error -// Flush() error -// Receive() (reply interface{}, err error) -// -// Send writes the command to the connection's output buffer. Flush flushes the -// connection's output buffer to the server. Receive reads a single reply from -// the server. The following example shows a simple pipeline. -// -// c.Send("SET", "foo", "bar") -// c.Send("GET", "foo") -// c.Flush() -// c.Receive() // reply from SET -// v, err = c.Receive() // reply from GET -// -// The Do method combines the functionality of the Send, Flush and Receive -// methods. The Do method starts by writing the command and flushing the output -// buffer. Next, the Do method receives all pending replies including the reply -// for the command just sent by Do. If any of the received replies is an error, -// then Do returns the error. If there are no errors, then Do returns the last -// reply. If the command argument to the Do method is "", then the Do method -// will flush the output buffer and receive pending replies without sending a -// command. -// -// Use the Send and Do methods to implement pipelined transactions. -// -// c.Send("MULTI") -// c.Send("INCR", "foo") -// c.Send("INCR", "bar") -// r, err := c.Do("EXEC") -// fmt.Println(r) // prints [1, 1] -// -// Concurrency -// -// Connections support one concurrent caller to the Receive method and one -// concurrent caller to the Send and Flush methods. No other concurrency is -// supported including concurrent calls to the Do and Close methods. -// -// For full concurrent access to Redis, use the thread-safe Pool to get, use -// and release a connection from within a goroutine. Connections returned from -// a Pool have the concurrency restrictions described in the previous -// paragraph. -// -// Publish and Subscribe -// -// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. -// -// c.Send("SUBSCRIBE", "example") -// c.Flush() -// for { -// reply, err := c.Receive() -// if err != nil { -// return err -// } -// // process pushed message -// } -// -// The PubSubConn type wraps a Conn with convenience methods for implementing -// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods -// send and flush a subscription management command. The receive method -// converts a pushed message to convenient types for use in a type switch. -// -// psc := redis.PubSubConn{Conn: c} -// psc.Subscribe("example") -// for { -// switch v := psc.Receive().(type) { -// case redis.Message: -// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) -// case redis.Subscription: -// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) -// case error: -// return v -// } -// } -// -// Reply Helpers -// -// The Bool, Int, Bytes, String, Strings and Values functions convert a reply -// to a value of a specific type. To allow convenient wrapping of calls to the -// connection Do and Receive methods, the functions take a second argument of -// type error. If the error is non-nil, then the helper function returns the -// error. If the error is nil, the function converts the reply to the specified -// type: -// -// exists, err := redis.Bool(c.Do("EXISTS", "foo")) -// if err != nil { -// // handle error return from c.Do or type conversion error. -// } -// -// The Scan function converts elements of a array reply to Go types: -// -// var value1 int -// var value2 string -// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) -// if err != nil { -// // handle error -// } -// if _, err := redis.Scan(reply, &value1, &value2); err != nil { -// // handle error -// } -// -// Errors -// -// Connection methods return error replies from the server as type redis.Error. -// -// Call the connection Err() method to determine if the connection encountered -// non-recoverable error such as a network error or protocol parsing error. If -// Err() returns a non-nil value, then the connection is not usable and should -// be closed. -package redis diff --git a/vendor/github.com/gomodule/redigo/redis/go17.go b/vendor/github.com/gomodule/redigo/redis/go17.go deleted file mode 100644 index 5f363791..00000000 --- a/vendor/github.com/gomodule/redigo/redis/go17.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build go1.7,!go1.8 - -package redis - -import "crypto/tls" - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, - Renegotiation: cfg.Renegotiation, - } -} diff --git a/vendor/github.com/gomodule/redigo/redis/go18.go b/vendor/github.com/gomodule/redigo/redis/go18.go deleted file mode 100644 index 558363be..00000000 --- a/vendor/github.com/gomodule/redigo/redis/go18.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.8 - -package redis - -import "crypto/tls" - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - return cfg.Clone() -} diff --git a/vendor/github.com/gomodule/redigo/redis/log.go b/vendor/github.com/gomodule/redigo/redis/log.go deleted file mode 100644 index a06db9d6..00000000 --- a/vendor/github.com/gomodule/redigo/redis/log.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "fmt" - "log" - "time" -) - -var ( - _ ConnWithTimeout = (*loggingConn)(nil) -) - -// NewLoggingConn returns a logging wrapper around a connection. -func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { - if prefix != "" { - prefix = prefix + "." - } - return &loggingConn{conn, logger, prefix, nil} -} - -//NewLoggingConnFilter returns a logging wrapper around a connection and a filter function. -func NewLoggingConnFilter(conn Conn, logger *log.Logger, prefix string, skip func(cmdName string) bool) Conn { - if prefix != "" { - prefix = prefix + "." - } - return &loggingConn{conn, logger, prefix, skip} -} - -type loggingConn struct { - Conn - logger *log.Logger - prefix string - skip func(cmdName string) bool -} - -func (c *loggingConn) Close() error { - err := c.Conn.Close() - var buf bytes.Buffer - fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) - c.logger.Output(2, buf.String()) - return err -} - -func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { - const chop = 32 - switch v := v.(type) { - case []byte: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case string: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case []interface{}: - if len(v) == 0 { - buf.WriteString("[]") - } else { - sep := "[" - fin := "]" - if len(v) > chop { - v = v[:chop] - fin = "...]" - } - for _, vv := range v { - buf.WriteString(sep) - c.printValue(buf, vv) - sep = ", " - } - buf.WriteString(fin) - } - default: - fmt.Fprint(buf, v) - } -} - -func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { - if c.skip != nil && c.skip(commandName) { - return - } - var buf bytes.Buffer - fmt.Fprintf(&buf, "%s%s(", c.prefix, method) - if method != "Receive" { - buf.WriteString(commandName) - for _, arg := range args { - buf.WriteString(", ") - c.printValue(&buf, arg) - } - } - buf.WriteString(") -> (") - if method != "Send" { - c.printValue(&buf, reply) - buf.WriteString(", ") - } - fmt.Fprintf(&buf, "%v)", err) - c.logger.Output(3, buf.String()) -} - -func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { - reply, err := c.Conn.Do(commandName, args...) - c.print("Do", commandName, args, reply, err) - return reply, err -} - -func (c *loggingConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (interface{}, error) { - reply, err := DoWithTimeout(c.Conn, timeout, commandName, args...) - c.print("DoWithTimeout", commandName, args, reply, err) - return reply, err -} - -func (c *loggingConn) Send(commandName string, args ...interface{}) error { - err := c.Conn.Send(commandName, args...) - c.print("Send", commandName, args, nil, err) - return err -} - -func (c *loggingConn) Receive() (interface{}, error) { - reply, err := c.Conn.Receive() - c.print("Receive", "", nil, reply, err) - return reply, err -} - -func (c *loggingConn) ReceiveWithTimeout(timeout time.Duration) (interface{}, error) { - reply, err := ReceiveWithTimeout(c.Conn, timeout) - c.print("ReceiveWithTimeout", "", nil, reply, err) - return reply, err -} diff --git a/vendor/github.com/gomodule/redigo/redis/pool.go b/vendor/github.com/gomodule/redigo/redis/pool.go deleted file mode 100644 index fef6bae5..00000000 --- a/vendor/github.com/gomodule/redigo/redis/pool.go +++ /dev/null @@ -1,635 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "context" - "crypto/rand" - "crypto/sha1" - "errors" - "io" - "strconv" - "sync" - "sync/atomic" - "time" -) - -var ( - _ ConnWithTimeout = (*activeConn)(nil) - _ ConnWithTimeout = (*errorConn)(nil) -) - -var nowFunc = time.Now // for testing - -// ErrPoolExhausted is returned from a pool connection method (Do, Send, -// Receive, Flush, Err) when the maximum number of database connections in the -// pool has been reached. -var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") - -var ( - errPoolClosed = errors.New("redigo: connection pool closed") - errConnClosed = errors.New("redigo: connection closed") -) - -// Pool maintains a pool of connections. The application calls the Get method -// to get a connection from the pool and the connection's Close method to -// return the connection's resources to the pool. -// -// The following example shows how to use a pool in a web application. The -// application creates a pool at application startup and makes it available to -// request handlers using a package level variable. The pool configuration used -// here is an example, not a recommendation. -// -// func newPool(addr string) *redis.Pool { -// return &redis.Pool{ -// MaxIdle: 3, -// IdleTimeout: 240 * time.Second, -// // Dial or DialContext must be set. When both are set, DialContext takes precedence over Dial. -// Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) }, -// } -// } -// -// var ( -// pool *redis.Pool -// redisServer = flag.String("redisServer", ":6379", "") -// ) -// -// func main() { -// flag.Parse() -// pool = newPool(*redisServer) -// ... -// } -// -// A request handler gets a connection from the pool and closes the connection -// when the handler is done: -// -// func serveHome(w http.ResponseWriter, r *http.Request) { -// conn := pool.Get() -// defer conn.Close() -// ... -// } -// -// Use the Dial function to authenticate connections with the AUTH command or -// select a database with the SELECT command: -// -// pool := &redis.Pool{ -// // Other pool configuration not shown in this example. -// Dial: func () (redis.Conn, error) { -// c, err := redis.Dial("tcp", server) -// if err != nil { -// return nil, err -// } -// if _, err := c.Do("AUTH", password); err != nil { -// c.Close() -// return nil, err -// } -// if _, err := c.Do("SELECT", db); err != nil { -// c.Close() -// return nil, err -// } -// return c, nil -// }, -// } -// -// Use the TestOnBorrow function to check the health of an idle connection -// before the connection is returned to the application. This example PINGs -// connections that have been idle more than a minute: -// -// pool := &redis.Pool{ -// // Other pool configuration not shown in this example. -// TestOnBorrow: func(c redis.Conn, t time.Time) error { -// if time.Since(t) < time.Minute { -// return nil -// } -// _, err := c.Do("PING") -// return err -// }, -// } -// -type Pool struct { - // Dial is an application supplied function for creating and configuring a - // connection. - // - // The connection returned from Dial must not be in a special state - // (subscribed to pubsub channel, transaction started, ...). - Dial func() (Conn, error) - - // DialContext is an application supplied function for creating and configuring a - // connection with the given context. - // - // The connection returned from Dial must not be in a special state - // (subscribed to pubsub channel, transaction started, ...). - DialContext func(ctx context.Context) (Conn, error) - - // TestOnBorrow is an optional application supplied function for checking - // the health of an idle connection before the connection is used again by - // the application. Argument t is the time that the connection was returned - // to the pool. If the function returns an error, then the connection is - // closed. - TestOnBorrow func(c Conn, t time.Time) error - - // Maximum number of idle connections in the pool. - MaxIdle int - - // Maximum number of connections allocated by the pool at a given time. - // When zero, there is no limit on the number of connections in the pool. - MaxActive int - - // Close connections after remaining idle for this duration. If the value - // is zero, then idle connections are not closed. Applications should set - // the timeout to a value less than the server's timeout. - IdleTimeout time.Duration - - // If Wait is true and the pool is at the MaxActive limit, then Get() waits - // for a connection to be returned to the pool before returning. - Wait bool - - // Close connections older than this duration. If the value is zero, then - // the pool does not close connections based on age. - MaxConnLifetime time.Duration - - chInitialized uint32 // set to 1 when field ch is initialized - - mu sync.Mutex // mu protects the following fields - closed bool // set to true when the pool is closed. - active int // the number of open connections in the pool - ch chan struct{} // limits open connections when p.Wait is true - idle idleList // idle connections - waitCount int64 // total number of connections waited for. - waitDuration time.Duration // total time waited for new connections. -} - -// NewPool creates a new pool. -// -// Deprecated: Initialize the Pool directly as shown in the example. -func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { - return &Pool{Dial: newFn, MaxIdle: maxIdle} -} - -// Get gets a connection. The application must close the returned connection. -// This method always returns a valid connection so that applications can defer -// error handling to the first use of the connection. If there is an error -// getting an underlying connection, then the connection Err, Do, Send, Flush -// and Receive methods return that error. -func (p *Pool) Get() Conn { - // GetContext returns errorConn in the first argument when an error occurs. - c, _ := p.GetContext(context.Background()) - return c -} - -// GetContext gets a connection using the provided context. -// -// The provided Context must be non-nil. If the context expires before the -// connection is complete, an error is returned. Any expiration on the context -// will not affect the returned connection. -// -// If the function completes without error, then the application must close the -// returned connection. -func (p *Pool) GetContext(ctx context.Context) (Conn, error) { - // Wait until there is a vacant connection in the pool. - waited, err := p.waitVacantConn(ctx) - if err != nil { - return nil, err - } - - p.mu.Lock() - - if waited > 0 { - p.waitCount++ - p.waitDuration += waited - } - - // Prune stale connections at the back of the idle list. - if p.IdleTimeout > 0 { - n := p.idle.count - for i := 0; i < n && p.idle.back != nil && p.idle.back.t.Add(p.IdleTimeout).Before(nowFunc()); i++ { - pc := p.idle.back - p.idle.popBack() - p.mu.Unlock() - pc.c.Close() - p.mu.Lock() - p.active-- - } - } - - // Get idle connection from the front of idle list. - for p.idle.front != nil { - pc := p.idle.front - p.idle.popFront() - p.mu.Unlock() - if (p.TestOnBorrow == nil || p.TestOnBorrow(pc.c, pc.t) == nil) && - (p.MaxConnLifetime == 0 || nowFunc().Sub(pc.created) < p.MaxConnLifetime) { - return &activeConn{p: p, pc: pc}, nil - } - pc.c.Close() - p.mu.Lock() - p.active-- - } - - // Check for pool closed before dialing a new connection. - if p.closed { - p.mu.Unlock() - err := errors.New("redigo: get on closed pool") - return errorConn{err}, err - } - - // Handle limit for p.Wait == false. - if !p.Wait && p.MaxActive > 0 && p.active >= p.MaxActive { - p.mu.Unlock() - return errorConn{ErrPoolExhausted}, ErrPoolExhausted - } - - p.active++ - p.mu.Unlock() - c, err := p.dial(ctx) - if err != nil { - c = nil - p.mu.Lock() - p.active-- - if p.ch != nil && !p.closed { - p.ch <- struct{}{} - } - p.mu.Unlock() - return errorConn{err}, err - } - return &activeConn{p: p, pc: &poolConn{c: c, created: nowFunc()}}, nil -} - -// PoolStats contains pool statistics. -type PoolStats struct { - // ActiveCount is the number of connections in the pool. The count includes - // idle connections and connections in use. - ActiveCount int - // IdleCount is the number of idle connections in the pool. - IdleCount int - - // WaitCount is the total number of connections waited for. - // This value is currently not guaranteed to be 100% accurate. - WaitCount int64 - - // WaitDuration is the total time blocked waiting for a new connection. - // This value is currently not guaranteed to be 100% accurate. - WaitDuration time.Duration -} - -// Stats returns pool's statistics. -func (p *Pool) Stats() PoolStats { - p.mu.Lock() - stats := PoolStats{ - ActiveCount: p.active, - IdleCount: p.idle.count, - WaitCount: p.waitCount, - WaitDuration: p.waitDuration, - } - p.mu.Unlock() - - return stats -} - -// ActiveCount returns the number of connections in the pool. The count -// includes idle connections and connections in use. -func (p *Pool) ActiveCount() int { - p.mu.Lock() - active := p.active - p.mu.Unlock() - return active -} - -// IdleCount returns the number of idle connections in the pool. -func (p *Pool) IdleCount() int { - p.mu.Lock() - idle := p.idle.count - p.mu.Unlock() - return idle -} - -// Close releases the resources used by the pool. -func (p *Pool) Close() error { - p.mu.Lock() - if p.closed { - p.mu.Unlock() - return nil - } - p.closed = true - p.active -= p.idle.count - pc := p.idle.front - p.idle.count = 0 - p.idle.front, p.idle.back = nil, nil - if p.ch != nil { - close(p.ch) - } - p.mu.Unlock() - for ; pc != nil; pc = pc.next { - pc.c.Close() - } - return nil -} - -func (p *Pool) lazyInit() { - // Fast path. - if atomic.LoadUint32(&p.chInitialized) == 1 { - return - } - // Slow path. - p.mu.Lock() - if p.chInitialized == 0 { - p.ch = make(chan struct{}, p.MaxActive) - if p.closed { - close(p.ch) - } else { - for i := 0; i < p.MaxActive; i++ { - p.ch <- struct{}{} - } - } - atomic.StoreUint32(&p.chInitialized, 1) - } - p.mu.Unlock() -} - -// waitVacantConn waits for a vacant connection in pool if waiting -// is enabled and pool size is limited, otherwise returns instantly. -// If ctx expires before that, an error is returned. -// -// If there were no vacant connection in the pool right away it returns the time spent waiting -// for that connection to appear in the pool. -func (p *Pool) waitVacantConn(ctx context.Context) (waited time.Duration, err error) { - if !p.Wait || p.MaxActive <= 0 { - // No wait or no connection limit. - return 0, nil - } - - p.lazyInit() - - // wait indicates if we believe it will block so its not 100% accurate - // however for stats it should be good enough. - wait := len(p.ch) == 0 - var start time.Time - if wait { - start = time.Now() - } - - if ctx == nil { - <-p.ch - } else { - select { - case <-p.ch: - // Additionally check that context hasn't expired while we were waiting, - // because `select` picks a random `case` if several of them are "ready". - select { - case <-ctx.Done(): - return 0, ctx.Err() - default: - } - case <-ctx.Done(): - return 0, ctx.Err() - } - } - - if wait { - return time.Since(start), nil - } - return 0, nil -} - -func (p *Pool) dial(ctx context.Context) (Conn, error) { - if p.DialContext != nil { - return p.DialContext(ctx) - } - if p.Dial != nil { - return p.Dial() - } - return nil, errors.New("redigo: must pass Dial or DialContext to pool") -} - -func (p *Pool) put(pc *poolConn, forceClose bool) error { - p.mu.Lock() - if !p.closed && !forceClose { - pc.t = nowFunc() - p.idle.pushFront(pc) - if p.idle.count > p.MaxIdle { - pc = p.idle.back - p.idle.popBack() - } else { - pc = nil - } - } - - if pc != nil { - p.mu.Unlock() - pc.c.Close() - p.mu.Lock() - p.active-- - } - - if p.ch != nil && !p.closed { - p.ch <- struct{}{} - } - p.mu.Unlock() - return nil -} - -type activeConn struct { - p *Pool - pc *poolConn - state int -} - -var ( - sentinel []byte - sentinelOnce sync.Once -) - -func initSentinel() { - p := make([]byte, 64) - if _, err := rand.Read(p); err == nil { - sentinel = p - } else { - h := sha1.New() - io.WriteString(h, "Oops, rand failed. Use time instead.") - io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) - sentinel = h.Sum(nil) - } -} - -func (ac *activeConn) Close() error { - pc := ac.pc - if pc == nil { - return nil - } - ac.pc = nil - - if ac.state&connectionMultiState != 0 { - pc.c.Send("DISCARD") - ac.state &^= (connectionMultiState | connectionWatchState) - } else if ac.state&connectionWatchState != 0 { - pc.c.Send("UNWATCH") - ac.state &^= connectionWatchState - } - if ac.state&connectionSubscribeState != 0 { - pc.c.Send("UNSUBSCRIBE") - pc.c.Send("PUNSUBSCRIBE") - // To detect the end of the message stream, ask the server to echo - // a sentinel value and read until we see that value. - sentinelOnce.Do(initSentinel) - pc.c.Send("ECHO", sentinel) - pc.c.Flush() - for { - p, err := pc.c.Receive() - if err != nil { - break - } - if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { - ac.state &^= connectionSubscribeState - break - } - } - } - pc.c.Do("") - ac.p.put(pc, ac.state != 0 || pc.c.Err() != nil) - return nil -} - -func (ac *activeConn) Err() error { - pc := ac.pc - if pc == nil { - return errConnClosed - } - return pc.c.Err() -} - -func (ac *activeConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { - pc := ac.pc - if pc == nil { - return nil, errConnClosed - } - ci := lookupCommandInfo(commandName) - ac.state = (ac.state | ci.Set) &^ ci.Clear - return pc.c.Do(commandName, args...) -} - -func (ac *activeConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) { - pc := ac.pc - if pc == nil { - return nil, errConnClosed - } - cwt, ok := pc.c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - ci := lookupCommandInfo(commandName) - ac.state = (ac.state | ci.Set) &^ ci.Clear - return cwt.DoWithTimeout(timeout, commandName, args...) -} - -func (ac *activeConn) Send(commandName string, args ...interface{}) error { - pc := ac.pc - if pc == nil { - return errConnClosed - } - ci := lookupCommandInfo(commandName) - ac.state = (ac.state | ci.Set) &^ ci.Clear - return pc.c.Send(commandName, args...) -} - -func (ac *activeConn) Flush() error { - pc := ac.pc - if pc == nil { - return errConnClosed - } - return pc.c.Flush() -} - -func (ac *activeConn) Receive() (reply interface{}, err error) { - pc := ac.pc - if pc == nil { - return nil, errConnClosed - } - return pc.c.Receive() -} - -func (ac *activeConn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { - pc := ac.pc - if pc == nil { - return nil, errConnClosed - } - cwt, ok := pc.c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - return cwt.ReceiveWithTimeout(timeout) -} - -type errorConn struct{ err error } - -func (ec errorConn) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } -func (ec errorConn) DoWithTimeout(time.Duration, string, ...interface{}) (interface{}, error) { - return nil, ec.err -} -func (ec errorConn) Send(string, ...interface{}) error { return ec.err } -func (ec errorConn) Err() error { return ec.err } -func (ec errorConn) Close() error { return nil } -func (ec errorConn) Flush() error { return ec.err } -func (ec errorConn) Receive() (interface{}, error) { return nil, ec.err } -func (ec errorConn) ReceiveWithTimeout(time.Duration) (interface{}, error) { return nil, ec.err } - -type idleList struct { - count int - front, back *poolConn -} - -type poolConn struct { - c Conn - t time.Time - created time.Time - next, prev *poolConn -} - -func (l *idleList) pushFront(pc *poolConn) { - pc.next = l.front - pc.prev = nil - if l.count == 0 { - l.back = pc - } else { - l.front.prev = pc - } - l.front = pc - l.count++ - return -} - -func (l *idleList) popFront() { - pc := l.front - l.count-- - if l.count == 0 { - l.front, l.back = nil, nil - } else { - pc.next.prev = nil - l.front = pc.next - } - pc.next, pc.prev = nil, nil -} - -func (l *idleList) popBack() { - pc := l.back - l.count-- - if l.count == 0 { - l.front, l.back = nil, nil - } else { - pc.prev.next = nil - l.back = pc.prev - } - pc.next, pc.prev = nil, nil -} diff --git a/vendor/github.com/gomodule/redigo/redis/pubsub.go b/vendor/github.com/gomodule/redigo/redis/pubsub.go deleted file mode 100644 index 2da60211..00000000 --- a/vendor/github.com/gomodule/redigo/redis/pubsub.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "time" -) - -// Subscription represents a subscribe or unsubscribe notification. -type Subscription struct { - // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" - Kind string - - // The channel that was changed. - Channel string - - // The current number of subscriptions for connection. - Count int -} - -// Message represents a message notification. -type Message struct { - // The originating channel. - Channel string - - // The matched pattern, if any - Pattern string - - // The message data. - Data []byte -} - -// Pong represents a pubsub pong notification. -type Pong struct { - Data string -} - -// PubSubConn wraps a Conn with convenience methods for subscribers. -type PubSubConn struct { - Conn Conn -} - -// Close closes the connection. -func (c PubSubConn) Close() error { - return c.Conn.Close() -} - -// Subscribe subscribes the connection to the specified channels. -func (c PubSubConn) Subscribe(channel ...interface{}) error { - c.Conn.Send("SUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PSubscribe subscribes the connection to the given patterns. -func (c PubSubConn) PSubscribe(channel ...interface{}) error { - c.Conn.Send("PSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Unsubscribe unsubscribes the connection from the given channels, or from all -// of them if none is given. -func (c PubSubConn) Unsubscribe(channel ...interface{}) error { - c.Conn.Send("UNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PUnsubscribe unsubscribes the connection from the given patterns, or from all -// of them if none is given. -func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { - c.Conn.Send("PUNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Ping sends a PING to the server with the specified data. -// -// The connection must be subscribed to at least one channel or pattern when -// calling this method. -func (c PubSubConn) Ping(data string) error { - c.Conn.Send("PING", data) - return c.Conn.Flush() -} - -// Receive returns a pushed message as a Subscription, Message, Pong or error. -// The return value is intended to be used directly in a type switch as -// illustrated in the PubSubConn example. -func (c PubSubConn) Receive() interface{} { - return c.receiveInternal(c.Conn.Receive()) -} - -// ReceiveWithTimeout is like Receive, but it allows the application to -// override the connection's default timeout. -func (c PubSubConn) ReceiveWithTimeout(timeout time.Duration) interface{} { - return c.receiveInternal(ReceiveWithTimeout(c.Conn, timeout)) -} - -func (c PubSubConn) receiveInternal(replyArg interface{}, errArg error) interface{} { - reply, err := Values(replyArg, errArg) - if err != nil { - return err - } - - var kind string - reply, err = Scan(reply, &kind) - if err != nil { - return err - } - - switch kind { - case "message": - var m Message - if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { - return err - } - return m - case "pmessage": - var m Message - if _, err := Scan(reply, &m.Pattern, &m.Channel, &m.Data); err != nil { - return err - } - return m - case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": - s := Subscription{Kind: kind} - if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { - return err - } - return s - case "pong": - var p Pong - if _, err := Scan(reply, &p.Data); err != nil { - return err - } - return p - } - return errors.New("redigo: unknown pubsub notification") -} diff --git a/vendor/github.com/gomodule/redigo/redis/redis.go b/vendor/github.com/gomodule/redigo/redis/redis.go deleted file mode 100644 index e4464874..00000000 --- a/vendor/github.com/gomodule/redigo/redis/redis.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "time" -) - -// Error represents an error returned in a command reply. -type Error string - -func (err Error) Error() string { return string(err) } - -// Conn represents a connection to a Redis server. -type Conn interface { - // Close closes the connection. - Close() error - - // Err returns a non-nil value when the connection is not usable. - Err() error - - // Do sends a command to the server and returns the received reply. - Do(commandName string, args ...interface{}) (reply interface{}, err error) - - // Send writes the command to the client's output buffer. - Send(commandName string, args ...interface{}) error - - // Flush flushes the output buffer to the Redis server. - Flush() error - - // Receive receives a single reply from the Redis server - Receive() (reply interface{}, err error) -} - -// Argument is the interface implemented by an object which wants to control how -// the object is converted to Redis bulk strings. -type Argument interface { - // RedisArg returns a value to be encoded as a bulk string per the - // conversions listed in the section 'Executing Commands'. - // Implementations should typically return a []byte or string. - RedisArg() interface{} -} - -// Scanner is implemented by an object which wants to control its value is -// interpreted when read from Redis. -type Scanner interface { - // RedisScan assigns a value from a Redis value. The argument src is one of - // the reply types listed in the section `Executing Commands`. - // - // An error should be returned if the value cannot be stored without - // loss of information. - RedisScan(src interface{}) error -} - -// ConnWithTimeout is an optional interface that allows the caller to override -// a connection's default read timeout. This interface is useful for executing -// the BLPOP, BRPOP, BRPOPLPUSH, XREAD and other commands that block at the -// server. -// -// A connection's default read timeout is set with the DialReadTimeout dial -// option. Applications should rely on the default timeout for commands that do -// not block at the server. -// -// All of the Conn implementations in this package satisfy the ConnWithTimeout -// interface. -// -// Use the DoWithTimeout and ReceiveWithTimeout helper functions to simplify -// use of this interface. -type ConnWithTimeout interface { - Conn - - // Do sends a command to the server and returns the received reply. - // The timeout overrides the read timeout set when dialing the - // connection. - DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) - - // Receive receives a single reply from the Redis server. The timeout - // overrides the read timeout set when dialing the connection. - ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) -} - -var errTimeoutNotSupported = errors.New("redis: connection does not support ConnWithTimeout") - -// DoWithTimeout executes a Redis command with the specified read timeout. If -// the connection does not satisfy the ConnWithTimeout interface, then an error -// is returned. -func DoWithTimeout(c Conn, timeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { - cwt, ok := c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - return cwt.DoWithTimeout(timeout, cmd, args...) -} - -// ReceiveWithTimeout receives a reply with the specified read timeout. If the -// connection does not satisfy the ConnWithTimeout interface, then an error is -// returned. -func ReceiveWithTimeout(c Conn, timeout time.Duration) (interface{}, error) { - cwt, ok := c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - return cwt.ReceiveWithTimeout(timeout) -} - -// SlowLog represents a redis SlowLog -type SlowLog struct { - // ID is a unique progressive identifier for every slow log entry. - ID int64 - - // Time is the unix timestamp at which the logged command was processed. - Time time.Time - - // ExecutationTime is the amount of time needed for the command execution. - ExecutionTime time.Duration - - // Args is the command name and arguments - Args []string - - // ClientAddr is the client IP address (4.0 only). - ClientAddr string - - // ClientName is the name set via the CLIENT SETNAME command (4.0 only). - ClientName string -} diff --git a/vendor/github.com/gomodule/redigo/redis/reply.go b/vendor/github.com/gomodule/redigo/redis/reply.go deleted file mode 100644 index 251a7884..00000000 --- a/vendor/github.com/gomodule/redigo/redis/reply.go +++ /dev/null @@ -1,583 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "fmt" - "strconv" - "time" -) - -// ErrNil indicates that a reply value is nil. -var ErrNil = errors.New("redigo: nil returned") - -// Int is a helper that converts a command reply to an integer. If err is not -// equal to nil, then Int returns 0, err. Otherwise, Int converts the -// reply to an int as follows: -// -// Reply type Result -// integer int(reply), nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int(reply interface{}, err error) (int, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - x := int(reply) - if int64(x) != reply { - return 0, strconv.ErrRange - } - return x, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 0) - return int(n), err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) -} - -// Int64 is a helper that converts a command reply to 64 bit integer. If err is -// not equal to nil, then Int64 returns 0, err. Otherwise, Int64 converts the -// reply to an int64 as follows: -// -// Reply type Result -// integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int64(reply interface{}, err error) (int64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - return reply, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) -} - -func errNegativeInt(v int64) error { - return fmt.Errorf("redigo: unexpected negative value %v for Uint64", v) -} - -// Uint64 is a helper that converts a command reply to 64 bit unsigned integer. -// If err is not equal to nil, then Uint64 returns 0, err. Otherwise, Uint64 converts the -// reply to an uint64 as follows: -// -// Reply type Result -// +integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Uint64(reply interface{}, err error) (uint64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - if reply < 0 { - return 0, errNegativeInt(reply) - } - return uint64(reply), nil - case []byte: - n, err := strconv.ParseUint(string(reply), 10, 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) -} - -// Float64 is a helper that converts a command reply to 64 bit float. If err is -// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts -// the reply to an int as follows: -// -// Reply type Result -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Float64(reply interface{}, err error) (float64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case []byte: - n, err := strconv.ParseFloat(string(reply), 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) -} - -// String is a helper that converts a command reply to a string. If err is not -// equal to nil, then String returns "", err. Otherwise String converts the -// reply to a string as follows: -// -// Reply type Result -// bulk string string(reply), nil -// simple string reply, nil -// nil "", ErrNil -// other "", error -func String(reply interface{}, err error) (string, error) { - if err != nil { - return "", err - } - switch reply := reply.(type) { - case []byte: - return string(reply), nil - case string: - return reply, nil - case nil: - return "", ErrNil - case Error: - return "", reply - } - return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) -} - -// Bytes is a helper that converts a command reply to a slice of bytes. If err -// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts -// the reply to a slice of bytes as follows: -// -// Reply type Result -// bulk string reply, nil -// simple string []byte(reply), nil -// nil nil, ErrNil -// other nil, error -func Bytes(reply interface{}, err error) ([]byte, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []byte: - return reply, nil - case string: - return []byte(reply), nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) -} - -// Bool is a helper that converts a command reply to a boolean. If err is not -// equal to nil, then Bool returns false, err. Otherwise Bool converts the -// reply to boolean as follows: -// -// Reply type Result -// integer value != 0, nil -// bulk string strconv.ParseBool(reply) -// nil false, ErrNil -// other false, error -func Bool(reply interface{}, err error) (bool, error) { - if err != nil { - return false, err - } - switch reply := reply.(type) { - case int64: - return reply != 0, nil - case []byte: - return strconv.ParseBool(string(reply)) - case nil: - return false, ErrNil - case Error: - return false, reply - } - return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) -} - -// MultiBulk is a helper that converts an array command reply to a []interface{}. -// -// Deprecated: Use Values instead. -func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } - -// Values is a helper that converts an array command reply to a []interface{}. -// If err is not equal to nil, then Values returns nil, err. Otherwise, Values -// converts the reply as follows: -// -// Reply type Result -// array reply, nil -// nil nil, ErrNil -// other nil, error -func Values(reply interface{}, err error) ([]interface{}, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []interface{}: - return reply, nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) -} - -func sliceHelper(reply interface{}, err error, name string, makeSlice func(int), assign func(int, interface{}) error) error { - if err != nil { - return err - } - switch reply := reply.(type) { - case []interface{}: - makeSlice(len(reply)) - for i := range reply { - if reply[i] == nil { - continue - } - if err := assign(i, reply[i]); err != nil { - return err - } - } - return nil - case nil: - return ErrNil - case Error: - return reply - } - return fmt.Errorf("redigo: unexpected type for %s, got type %T", name, reply) -} - -// Float64s is a helper that converts an array command reply to a []float64. If -// err is not equal to nil, then Float64s returns nil, err. Nil array items are -// converted to 0 in the output slice. Floats64 returns an error if an array -// item is not a bulk string or nil. -func Float64s(reply interface{}, err error) ([]float64, error) { - var result []float64 - err = sliceHelper(reply, err, "Float64s", func(n int) { result = make([]float64, n) }, func(i int, v interface{}) error { - p, ok := v.([]byte) - if !ok { - return fmt.Errorf("redigo: unexpected element type for Floats64, got type %T", v) - } - f, err := strconv.ParseFloat(string(p), 64) - result[i] = f - return err - }) - return result, err -} - -// Strings is a helper that converts an array command reply to a []string. If -// err is not equal to nil, then Strings returns nil, err. Nil array items are -// converted to "" in the output slice. Strings returns an error if an array -// item is not a bulk string or nil. -func Strings(reply interface{}, err error) ([]string, error) { - var result []string - err = sliceHelper(reply, err, "Strings", func(n int) { result = make([]string, n) }, func(i int, v interface{}) error { - switch v := v.(type) { - case string: - result[i] = v - return nil - case []byte: - result[i] = string(v) - return nil - default: - return fmt.Errorf("redigo: unexpected element type for Strings, got type %T", v) - } - }) - return result, err -} - -// ByteSlices is a helper that converts an array command reply to a [][]byte. -// If err is not equal to nil, then ByteSlices returns nil, err. Nil array -// items are stay nil. ByteSlices returns an error if an array item is not a -// bulk string or nil. -func ByteSlices(reply interface{}, err error) ([][]byte, error) { - var result [][]byte - err = sliceHelper(reply, err, "ByteSlices", func(n int) { result = make([][]byte, n) }, func(i int, v interface{}) error { - p, ok := v.([]byte) - if !ok { - return fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", v) - } - result[i] = p - return nil - }) - return result, err -} - -// Int64s is a helper that converts an array command reply to a []int64. -// If err is not equal to nil, then Int64s returns nil, err. Nil array -// items are stay nil. Int64s returns an error if an array item is not a -// bulk string or nil. -func Int64s(reply interface{}, err error) ([]int64, error) { - var result []int64 - err = sliceHelper(reply, err, "Int64s", func(n int) { result = make([]int64, n) }, func(i int, v interface{}) error { - switch v := v.(type) { - case int64: - result[i] = v - return nil - case []byte: - n, err := strconv.ParseInt(string(v), 10, 64) - result[i] = n - return err - default: - return fmt.Errorf("redigo: unexpected element type for Int64s, got type %T", v) - } - }) - return result, err -} - -// Ints is a helper that converts an array command reply to a []in. -// If err is not equal to nil, then Ints returns nil, err. Nil array -// items are stay nil. Ints returns an error if an array item is not a -// bulk string or nil. -func Ints(reply interface{}, err error) ([]int, error) { - var result []int - err = sliceHelper(reply, err, "Ints", func(n int) { result = make([]int, n) }, func(i int, v interface{}) error { - switch v := v.(type) { - case int64: - n := int(v) - if int64(n) != v { - return strconv.ErrRange - } - result[i] = n - return nil - case []byte: - n, err := strconv.Atoi(string(v)) - result[i] = n - return err - default: - return fmt.Errorf("redigo: unexpected element type for Ints, got type %T", v) - } - }) - return result, err -} - -// StringMap is a helper that converts an array of strings (alternating key, value) -// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. -// Requires an even number of values in result. -func StringMap(result interface{}, err error) (map[string]string, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: StringMap expects even number of values result") - } - m := make(map[string]string, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, okKey := values[i].([]byte) - value, okValue := values[i+1].([]byte) - if !okKey || !okValue { - return nil, errors.New("redigo: StringMap key not a bulk string value") - } - m[string(key)] = string(value) - } - return m, nil -} - -// IntMap is a helper that converts an array of strings (alternating key, value) -// into a map[string]int. The HGETALL commands return replies in this format. -// Requires an even number of values in result. -func IntMap(result interface{}, err error) (map[string]int, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: IntMap expects even number of values result") - } - m := make(map[string]int, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].([]byte) - if !ok { - return nil, errors.New("redigo: IntMap key not a bulk string value") - } - value, err := Int(values[i+1], nil) - if err != nil { - return nil, err - } - m[string(key)] = value - } - return m, nil -} - -// Int64Map is a helper that converts an array of strings (alternating key, value) -// into a map[string]int64. The HGETALL commands return replies in this format. -// Requires an even number of values in result. -func Int64Map(result interface{}, err error) (map[string]int64, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: Int64Map expects even number of values result") - } - m := make(map[string]int64, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].([]byte) - if !ok { - return nil, errors.New("redigo: Int64Map key not a bulk string value") - } - value, err := Int64(values[i+1], nil) - if err != nil { - return nil, err - } - m[string(key)] = value - } - return m, nil -} - -// Positions is a helper that converts an array of positions (lat, long) -// into a [][2]float64. The GEOPOS command returns replies in this format. -func Positions(result interface{}, err error) ([]*[2]float64, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - positions := make([]*[2]float64, len(values)) - for i := range values { - if values[i] == nil { - continue - } - p, ok := values[i].([]interface{}) - if !ok { - return nil, fmt.Errorf("redigo: unexpected element type for interface slice, got type %T", values[i]) - } - if len(p) != 2 { - return nil, fmt.Errorf("redigo: unexpected number of values for a member position, got %d", len(p)) - } - lat, err := Float64(p[0], nil) - if err != nil { - return nil, err - } - long, err := Float64(p[1], nil) - if err != nil { - return nil, err - } - positions[i] = &[2]float64{lat, long} - } - return positions, nil -} - -// Uint64s is a helper that converts an array command reply to a []uint64. -// If err is not equal to nil, then Uint64s returns nil, err. Nil array -// items are stay nil. Uint64s returns an error if an array item is not a -// bulk string or nil. -func Uint64s(reply interface{}, err error) ([]uint64, error) { - var result []uint64 - err = sliceHelper(reply, err, "Uint64s", func(n int) { result = make([]uint64, n) }, func(i int, v interface{}) error { - switch v := v.(type) { - case uint64: - result[i] = v - return nil - case []byte: - n, err := strconv.ParseUint(string(v), 10, 64) - result[i] = n - return err - default: - return fmt.Errorf("redigo: unexpected element type for Uint64s, got type %T", v) - } - }) - return result, err -} - -// Uint64Map is a helper that converts an array of strings (alternating key, value) -// into a map[string]uint64. The HGETALL commands return replies in this format. -// Requires an even number of values in result. -func Uint64Map(result interface{}, err error) (map[string]uint64, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: Uint64Map expects even number of values result") - } - m := make(map[string]uint64, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].([]byte) - if !ok { - return nil, errors.New("redigo: Uint64Map key not a bulk string value") - } - value, err := Uint64(values[i+1], nil) - if err != nil { - return nil, err - } - m[string(key)] = value - } - return m, nil -} - -// SlowLogs is a helper that parse the SLOWLOG GET command output and -// return the array of SlowLog -func SlowLogs(result interface{}, err error) ([]SlowLog, error) { - rawLogs, err := Values(result, err) - if err != nil { - return nil, err - } - logs := make([]SlowLog, len(rawLogs)) - for i, rawLog := range rawLogs { - rawLog, ok := rawLog.([]interface{}) - if !ok { - return nil, errors.New("redigo: slowlog element is not an array") - } - - var log SlowLog - - if len(rawLog) < 4 { - return nil, errors.New("redigo: slowlog element has less than four elements") - } - log.ID, ok = rawLog[0].(int64) - if !ok { - return nil, errors.New("redigo: slowlog element[0] not an int64") - } - timestamp, ok := rawLog[1].(int64) - if !ok { - return nil, errors.New("redigo: slowlog element[1] not an int64") - } - log.Time = time.Unix(timestamp, 0) - duration, ok := rawLog[2].(int64) - if !ok { - return nil, errors.New("redigo: slowlog element[2] not an int64") - } - log.ExecutionTime = time.Duration(duration) * time.Microsecond - - log.Args, err = Strings(rawLog[3], nil) - if err != nil { - return nil, fmt.Errorf("redigo: slowlog element[3] is not array of string. actual error is : %s", err.Error()) - } - if len(rawLog) >= 6 { - log.ClientAddr, err = String(rawLog[4], nil) - if err != nil { - return nil, fmt.Errorf("redigo: slowlog element[4] is not a string. actual error is : %s", err.Error()) - } - log.ClientName, err = String(rawLog[5], nil) - if err != nil { - return nil, fmt.Errorf("redigo: slowlog element[5] is not a string. actual error is : %s", err.Error()) - } - } - logs[i] = log - } - return logs, nil -} diff --git a/vendor/github.com/gomodule/redigo/redis/scan.go b/vendor/github.com/gomodule/redigo/redis/scan.go deleted file mode 100644 index 93d0c658..00000000 --- a/vendor/github.com/gomodule/redigo/redis/scan.go +++ /dev/null @@ -1,673 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "sync" -) - -var ( - scannerType = reflect.TypeOf((*Scanner)(nil)).Elem() -) - -func ensureLen(d reflect.Value, n int) { - if n > d.Cap() { - d.Set(reflect.MakeSlice(d.Type(), n, n)) - } else { - d.SetLen(n) - } -} - -func cannotConvert(d reflect.Value, s interface{}) error { - var sname string - switch s.(type) { - case string: - sname = "Redis simple string" - case Error: - sname = "Redis error" - case int64: - sname = "Redis integer" - case []byte: - sname = "Redis bulk string" - case []interface{}: - sname = "Redis array" - case nil: - sname = "Redis nil" - default: - sname = reflect.TypeOf(s).String() - } - return fmt.Errorf("cannot convert from %s to %s", sname, d.Type()) -} - -func convertAssignNil(d reflect.Value) (err error) { - switch d.Type().Kind() { - case reflect.Slice, reflect.Interface: - d.Set(reflect.Zero(d.Type())) - default: - err = cannotConvert(d, nil) - } - return err -} - -func convertAssignError(d reflect.Value, s Error) (err error) { - if d.Kind() == reflect.String { - d.SetString(string(s)) - } else if d.Kind() == reflect.Slice && d.Type().Elem().Kind() == reflect.Uint8 { - d.SetBytes([]byte(s)) - } else { - err = cannotConvert(d, s) - } - return -} - -func convertAssignString(d reflect.Value, s string) (err error) { - switch d.Type().Kind() { - case reflect.Float32, reflect.Float64: - var x float64 - x, err = strconv.ParseFloat(s, d.Type().Bits()) - d.SetFloat(x) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - var x int64 - x, err = strconv.ParseInt(s, 10, d.Type().Bits()) - d.SetInt(x) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - var x uint64 - x, err = strconv.ParseUint(s, 10, d.Type().Bits()) - d.SetUint(x) - case reflect.Bool: - var x bool - x, err = strconv.ParseBool(s) - d.SetBool(x) - case reflect.String: - d.SetString(s) - case reflect.Slice: - if d.Type().Elem().Kind() == reflect.Uint8 { - d.SetBytes([]byte(s)) - } else { - err = cannotConvert(d, s) - } - case reflect.Ptr: - err = convertAssignString(d.Elem(), s) - default: - err = cannotConvert(d, s) - } - return -} - -func convertAssignBulkString(d reflect.Value, s []byte) (err error) { - switch d.Type().Kind() { - case reflect.Slice: - // Handle []byte destination here to avoid unnecessary - // []byte -> string -> []byte converion. - if d.Type().Elem().Kind() == reflect.Uint8 { - d.SetBytes(s) - } else { - err = cannotConvert(d, s) - } - case reflect.Ptr: - if d.CanInterface() && d.CanSet() { - if s == nil { - if d.IsNil() { - return nil - } - - d.Set(reflect.Zero(d.Type())) - return nil - } - - if d.IsNil() { - d.Set(reflect.New(d.Type().Elem())) - } - - if sc, ok := d.Interface().(Scanner); ok { - return sc.RedisScan(s) - } - } - err = convertAssignString(d, string(s)) - default: - err = convertAssignString(d, string(s)) - } - return err -} - -func convertAssignInt(d reflect.Value, s int64) (err error) { - switch d.Type().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - d.SetInt(s) - if d.Int() != s { - err = strconv.ErrRange - d.SetInt(0) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if s < 0 { - err = strconv.ErrRange - } else { - x := uint64(s) - d.SetUint(x) - if d.Uint() != x { - err = strconv.ErrRange - d.SetUint(0) - } - } - case reflect.Bool: - d.SetBool(s != 0) - default: - err = cannotConvert(d, s) - } - return -} - -func convertAssignValue(d reflect.Value, s interface{}) (err error) { - if d.Kind() != reflect.Ptr { - if d.CanAddr() { - d2 := d.Addr() - if d2.CanInterface() { - if scanner, ok := d2.Interface().(Scanner); ok { - return scanner.RedisScan(s) - } - } - } - } else if d.CanInterface() { - // Already a reflect.Ptr - if d.IsNil() { - d.Set(reflect.New(d.Type().Elem())) - } - if scanner, ok := d.Interface().(Scanner); ok { - return scanner.RedisScan(s) - } - } - - switch s := s.(type) { - case nil: - err = convertAssignNil(d) - case []byte: - err = convertAssignBulkString(d, s) - case int64: - err = convertAssignInt(d, s) - case string: - err = convertAssignString(d, s) - case Error: - err = convertAssignError(d, s) - default: - err = cannotConvert(d, s) - } - return err -} - -func convertAssignArray(d reflect.Value, s []interface{}) error { - if d.Type().Kind() != reflect.Slice { - return cannotConvert(d, s) - } - ensureLen(d, len(s)) - for i := 0; i < len(s); i++ { - if err := convertAssignValue(d.Index(i), s[i]); err != nil { - return err - } - } - return nil -} - -func convertAssign(d interface{}, s interface{}) (err error) { - if scanner, ok := d.(Scanner); ok { - return scanner.RedisScan(s) - } - - // Handle the most common destination types using type switches and - // fall back to reflection for all other types. - switch s := s.(type) { - case nil: - // ignore - case []byte: - switch d := d.(type) { - case *string: - *d = string(s) - case *int: - *d, err = strconv.Atoi(string(s)) - case *bool: - *d, err = strconv.ParseBool(string(s)) - case *[]byte: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignBulkString(d.Elem(), s) - } - } - case int64: - switch d := d.(type) { - case *int: - x := int(s) - if int64(x) != s { - err = strconv.ErrRange - x = 0 - } - *d = x - case *bool: - *d = s != 0 - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignInt(d.Elem(), s) - } - } - case string: - switch d := d.(type) { - case *string: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - err = cannotConvert(reflect.ValueOf(d), s) - } - case []interface{}: - switch d := d.(type) { - case *[]interface{}: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignArray(d.Elem(), s) - } - } - case Error: - err = s - default: - err = cannotConvert(reflect.ValueOf(d), s) - } - return -} - -// Scan copies from src to the values pointed at by dest. -// -// Scan uses RedisScan if available otherwise: -// -// The values pointed at by dest must be an integer, float, boolean, string, -// []byte, interface{} or slices of these types. Scan uses the standard strconv -// package to convert bulk strings to numeric and boolean types. -// -// If a dest value is nil, then the corresponding src value is skipped. -// -// If a src element is nil, then the corresponding dest value is not modified. -// -// To enable easy use of Scan in a loop, Scan returns the slice of src -// following the copied values. -func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { - if len(src) < len(dest) { - return nil, errors.New("redigo.Scan: array short") - } - var err error - for i, d := range dest { - err = convertAssign(d, src[i]) - if err != nil { - err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err) - break - } - } - return src[len(dest):], err -} - -type fieldSpec struct { - name string - index []int - omitEmpty bool -} - -type structSpec struct { - m map[string]*fieldSpec - l []*fieldSpec -} - -func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { - return ss.m[string(name)] -} - -func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - switch { - case f.PkgPath != "" && !f.Anonymous: - // Ignore unexported fields. - case f.Anonymous: - switch f.Type.Kind() { - case reflect.Struct: - compileStructSpec(f.Type, depth, append(index, i), ss) - case reflect.Ptr: - // TODO(steve): Protect against infinite recursion. - if f.Type.Elem().Kind() == reflect.Struct { - compileStructSpec(f.Type.Elem(), depth, append(index, i), ss) - } - } - default: - fs := &fieldSpec{name: f.Name} - tag := f.Tag.Get("redis") - p := strings.Split(tag, ",") - if len(p) > 0 { - if p[0] == "-" { - continue - } - if len(p[0]) > 0 { - fs.name = p[0] - } - for _, s := range p[1:] { - switch s { - case "omitempty": - fs.omitEmpty = true - default: - panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name())) - } - } - } - d, found := depth[fs.name] - if !found { - d = 1 << 30 - } - switch { - case len(index) == d: - // At same depth, remove from result. - delete(ss.m, fs.name) - j := 0 - for i := 0; i < len(ss.l); i++ { - if fs.name != ss.l[i].name { - ss.l[j] = ss.l[i] - j += 1 - } - } - ss.l = ss.l[:j] - case len(index) < d: - fs.index = make([]int, len(index)+1) - copy(fs.index, index) - fs.index[len(index)] = i - depth[fs.name] = len(index) - ss.m[fs.name] = fs - ss.l = append(ss.l, fs) - } - } - } -} - -var ( - structSpecMutex sync.RWMutex - structSpecCache = make(map[reflect.Type]*structSpec) - defaultFieldSpec = &fieldSpec{} -) - -func structSpecForType(t reflect.Type) *structSpec { - - structSpecMutex.RLock() - ss, found := structSpecCache[t] - structSpecMutex.RUnlock() - if found { - return ss - } - - structSpecMutex.Lock() - defer structSpecMutex.Unlock() - ss, found = structSpecCache[t] - if found { - return ss - } - - ss = &structSpec{m: make(map[string]*fieldSpec)} - compileStructSpec(t, make(map[string]int), nil, ss) - structSpecCache[t] = ss - return ss -} - -var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct") - -// ScanStruct scans alternating names and values from src to a struct. The -// HGETALL and CONFIG GET commands return replies in this format. -// -// ScanStruct uses exported field names to match values in the response. Use -// 'redis' field tag to override the name: -// -// Field int `redis:"myName"` -// -// Fields with the tag redis:"-" are ignored. -// -// Each field uses RedisScan if available otherwise: -// Integer, float, boolean, string and []byte fields are supported. Scan uses the -// standard strconv package to convert bulk string values to numeric and -// boolean types. -// -// If a src element is nil, then the corresponding field is not modified. -func ScanStruct(src []interface{}, dest interface{}) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return errScanStructValue - } - d = d.Elem() - if d.Kind() != reflect.Struct { - return errScanStructValue - } - ss := structSpecForType(d.Type()) - - if len(src)%2 != 0 { - return errors.New("redigo.ScanStruct: number of values not a multiple of 2") - } - - for i := 0; i < len(src); i += 2 { - s := src[i+1] - if s == nil { - continue - } - name, ok := src[i].([]byte) - if !ok { - return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i) - } - fs := ss.fieldSpec(name) - if fs == nil { - continue - } - if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { - return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err) - } - } - return nil -} - -var ( - errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct") -) - -// ScanSlice scans src to the slice pointed to by dest. -// -// If the target is a slice of types which implement Scanner then the custom -// RedisScan method is used otherwise the following rules apply: -// -// The elements in the dest slice must be integer, float, boolean, string, struct -// or pointer to struct values. -// -// Struct fields must be integer, float, boolean or string values. All struct -// fields are used unless a subset is specified using fieldNames. -func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return errScanSliceValue - } - d = d.Elem() - if d.Kind() != reflect.Slice { - return errScanSliceValue - } - - isPtr := false - t := d.Type().Elem() - st := t - if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { - isPtr = true - t = t.Elem() - } - - if t.Kind() != reflect.Struct || st.Implements(scannerType) { - ensureLen(d, len(src)) - for i, s := range src { - if s == nil { - continue - } - if err := convertAssignValue(d.Index(i), s); err != nil { - return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err) - } - } - return nil - } - - ss := structSpecForType(t) - fss := ss.l - if len(fieldNames) > 0 { - fss = make([]*fieldSpec, len(fieldNames)) - for i, name := range fieldNames { - fss[i] = ss.m[name] - if fss[i] == nil { - return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name) - } - } - } - - if len(fss) == 0 { - return errors.New("redigo.ScanSlice: no struct fields") - } - - n := len(src) / len(fss) - if n*len(fss) != len(src) { - return errors.New("redigo.ScanSlice: length not a multiple of struct field count") - } - - ensureLen(d, n) - for i := 0; i < n; i++ { - d := d.Index(i) - if isPtr { - if d.IsNil() { - d.Set(reflect.New(t)) - } - d = d.Elem() - } - for j, fs := range fss { - s := src[i*len(fss)+j] - if s == nil { - continue - } - if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { - return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err) - } - } - } - return nil -} - -// Args is a helper for constructing command arguments from structured values. -type Args []interface{} - -// Add returns the result of appending value to args. -func (args Args) Add(value ...interface{}) Args { - return append(args, value...) -} - -// AddFlat returns the result of appending the flattened value of v to args. -// -// Maps are flattened by appending the alternating keys and map values to args. -// -// Slices are flattened by appending the slice elements to args. -// -// Structs are flattened by appending the alternating names and values of -// exported fields to args. If v is a nil struct pointer, then nothing is -// appended. The 'redis' field tag overrides struct field names. See ScanStruct -// for more information on the use of the 'redis' field tag. -// -// Other types are appended to args as is. -func (args Args) AddFlat(v interface{}) Args { - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Struct: - args = flattenStruct(args, rv) - case reflect.Slice: - for i := 0; i < rv.Len(); i++ { - args = append(args, rv.Index(i).Interface()) - } - case reflect.Map: - for _, k := range rv.MapKeys() { - args = append(args, k.Interface(), rv.MapIndex(k).Interface()) - } - case reflect.Ptr: - if rv.Type().Elem().Kind() == reflect.Struct { - if !rv.IsNil() { - args = flattenStruct(args, rv.Elem()) - } - } else { - args = append(args, v) - } - default: - args = append(args, v) - } - return args -} - -func flattenStruct(args Args, v reflect.Value) Args { - ss := structSpecForType(v.Type()) - for _, fs := range ss.l { - fv := v.FieldByIndex(fs.index) - if fs.omitEmpty { - var empty = false - switch fv.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - empty = fv.Len() == 0 - case reflect.Bool: - empty = !fv.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - empty = fv.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - empty = fv.Uint() == 0 - case reflect.Float32, reflect.Float64: - empty = fv.Float() == 0 - case reflect.Interface, reflect.Ptr: - empty = fv.IsNil() - } - if empty { - continue - } - } - if arg, ok := fv.Interface().(Argument); ok { - args = append(args, fs.name, arg.RedisArg()) - } else if fv.Kind() == reflect.Ptr { - if !fv.IsNil() { - args = append(args, fs.name, fv.Elem().Interface()) - } - } else { - args = append(args, fs.name, fv.Interface()) - } - } - return args -} diff --git a/vendor/github.com/gomodule/redigo/redis/script.go b/vendor/github.com/gomodule/redigo/redis/script.go deleted file mode 100644 index 0ef1c821..00000000 --- a/vendor/github.com/gomodule/redigo/redis/script.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "crypto/sha1" - "encoding/hex" - "io" - "strings" -) - -// Script encapsulates the source, hash and key count for a Lua script. See -// http://redis.io/commands/eval for information on scripts in Redis. -type Script struct { - keyCount int - src string - hash string -} - -// NewScript returns a new script object. If keyCount is greater than or equal -// to zero, then the count is automatically inserted in the EVAL command -// argument list. If keyCount is less than zero, then the application supplies -// the count as the first value in the keysAndArgs argument to the Do, Send and -// SendHash methods. -func NewScript(keyCount int, src string) *Script { - h := sha1.New() - io.WriteString(h, src) - return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} -} - -func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { - var args []interface{} - if s.keyCount < 0 { - args = make([]interface{}, 1+len(keysAndArgs)) - args[0] = spec - copy(args[1:], keysAndArgs) - } else { - args = make([]interface{}, 2+len(keysAndArgs)) - args[0] = spec - args[1] = s.keyCount - copy(args[2:], keysAndArgs) - } - return args -} - -// Hash returns the script hash. -func (s *Script) Hash() string { - return s.hash -} - -// Do evaluates the script. Under the covers, Do optimistically evaluates the -// script using the EVALSHA command. If the command fails because the script is -// not loaded, then Do evaluates the script using the EVAL command (thus -// causing the script to load). -func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { - v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) - if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { - v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) - } - return v, err -} - -// SendHash evaluates the script without waiting for the reply. The script is -// evaluated with the EVALSHA command. The application must ensure that the -// script is loaded by a previous call to Send, Do or Load methods. -func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) -} - -// Send evaluates the script without waiting for the reply. -func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVAL", s.args(s.src, keysAndArgs)...) -} - -// Load loads the script without evaluating it. -func (s *Script) Load(c Conn) error { - _, err := c.Do("SCRIPT", "LOAD", s.src) - return err -} diff --git a/vendor/github.com/hashicorp/hcl/go.mod b/vendor/github.com/hashicorp/hcl/go.mod deleted file mode 100644 index 4debbbe3..00000000 --- a/vendor/github.com/hashicorp/hcl/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/hashicorp/hcl - -require github.com/davecgh/go-spew v1.1.1 diff --git a/vendor/github.com/hashicorp/hcl/go.sum b/vendor/github.com/hashicorp/hcl/go.sum deleted file mode 100644 index b5e2922e..00000000 --- a/vendor/github.com/hashicorp/hcl/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/vendor/github.com/jmoiron/sqlx/go.mod b/vendor/github.com/jmoiron/sqlx/go.mod deleted file mode 100644 index 66c67561..00000000 --- a/vendor/github.com/jmoiron/sqlx/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module github.com/jmoiron/sqlx - -require ( - github.com/go-sql-driver/mysql v1.4.0 - github.com/lib/pq v1.0.0 - github.com/mattn/go-sqlite3 v1.9.0 -) diff --git a/vendor/github.com/jmoiron/sqlx/go.sum b/vendor/github.com/jmoiron/sqlx/go.sum deleted file mode 100644 index a3239ada..00000000 --- a/vendor/github.com/jmoiron/sqlx/go.sum +++ /dev/null @@ -1,6 +0,0 @@ -github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= diff --git a/vendor/github.com/josharian/intern/go.mod b/vendor/github.com/josharian/intern/go.mod deleted file mode 100644 index f2262ff0..00000000 --- a/vendor/github.com/josharian/intern/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/josharian/intern - -go 1.5 diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 52b111d5..c589addf 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -8,8 +8,6 @@ A high-performance 100% compatible drop-in replacement of "encoding/json" -You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) - # Benchmark ![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod deleted file mode 100644 index e05c42ff..00000000 --- a/vendor/github.com/json-iterator/go/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/json-iterator/go - -go 1.12 - -require ( - github.com/davecgh/go-spew v1.1.1 - github.com/google/gofuzz v1.0.0 - github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 - github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 - github.com/stretchr/testify v1.3.0 -) diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum deleted file mode 100644 index be00a6df..00000000 --- a/vendor/github.com/json-iterator/go/go.sum +++ /dev/null @@ -1,15 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/leodido/go-urn/go.mod b/vendor/github.com/leodido/go-urn/go.mod deleted file mode 100644 index 98cf196d..00000000 --- a/vendor/github.com/leodido/go-urn/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/leodido/go-urn - -go 1.13 - -require github.com/stretchr/testify v1.6.1 diff --git a/vendor/github.com/leodido/go-urn/go.sum b/vendor/github.com/leodido/go-urn/go.sum deleted file mode 100644 index afe7890c..00000000 --- a/vendor/github.com/leodido/go-urn/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/magiconair/properties/go.mod b/vendor/github.com/magiconair/properties/go.mod deleted file mode 100644 index 4ff090bd..00000000 --- a/vendor/github.com/magiconair/properties/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/magiconair/properties - -go 1.13 diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml deleted file mode 100644 index 604314dd..00000000 --- a/vendor/github.com/mattn/go-isatty/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: false -go: - - 1.13.x - - tip - -before_install: - - go get -t -v ./... - -script: - - ./go.test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod deleted file mode 100644 index 605c4c22..00000000 --- a/vendor/github.com/mattn/go-isatty/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/mattn/go-isatty - -go 1.12 - -require golang.org/x/sys v0.0.0-20200116001909-b77594299b42 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum deleted file mode 100644 index 912e29cb..00000000 --- a/vendor/github.com/mattn/go-isatty/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 711f2880..39bbcf00 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -1,3 +1,4 @@ +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine // +build darwin freebsd openbsd netbsd dragonfly // +build !appengine diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go index 3eba4cb3..31503226 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -1,3 +1,4 @@ +//go:build appengine || js || nacl || wasm // +build appengine js nacl wasm package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go index c5b6e0c0..bae7f9bb 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_plan9.go +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -1,3 +1,4 @@ +//go:build plan9 // +build plan9 package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go index 30106707..0c3acf2d 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -1,5 +1,5 @@ -// +build solaris -// +build !appengine +//go:build solaris && !appengine +// +build solaris,!appengine package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go index 4e7b850e..67787657 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -1,3 +1,4 @@ +//go:build (linux || aix || zos) && !appengine // +build linux aix zos // +build !appengine diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go index 1fa86915..8e3c9917 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !appengine +//go:build windows && !appengine +// +build windows,!appengine package isatty @@ -76,7 +76,7 @@ func isCygwinPipeName(name string) bool { } // getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler -// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion // guys are using Windows XP, this is a workaround for those guys, it will also work on system from // Windows vista to 10 // see https://stackoverflow.com/a/18792477 for details diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 1955f287..38a09916 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,6 +1,16 @@ -## unreleased +## 1.4.3 -* Fix regression where `*time.Time` value would be set to empty and not be sent +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent to decode hooks properly [GH-232] ## 1.4.0 diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 92e6f76f..4d4bbc73 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -62,7 +62,8 @@ func DecodeHookExec( func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { return func(f reflect.Value, t reflect.Value) (interface{}, error) { var err error - var data interface{} + data := f.Interface() + newFrom := f for _, f1 := range fs { data, err = DecodeHookExec(f1, newFrom, t) diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod deleted file mode 100644 index a03ae973..00000000 --- a/vendor/github.com/mitchellh/mapstructure/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/mitchellh/mapstructure - -go 1.14 diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 3643901f..6b81b006 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -192,7 +192,7 @@ type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface // source and target types. type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) -// DecodeHookFuncRaw is a DecodeHookFunc which has complete access to both the source and target +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target // values. type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) @@ -258,6 +258,11 @@ type DecoderConfig struct { // The tag name that mapstructure reads for field names. This // defaults to "mapstructure" TagName string + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool } // A Decoder takes a raw interface value and turns it into structured @@ -376,6 +381,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { config.TagName = "mapstructure" } + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + result := &Decoder{ config: config, } @@ -675,16 +684,12 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e } case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": jn := data.(json.Number) - i, err := jn.Int64() + i, err := strconv.ParseUint(string(jn), 0, 64) if err != nil { return fmt.Errorf( "error decoding json.Number into %s: %s", name, err) } - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) + val.SetUint(i) default: return fmt.Errorf( "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", @@ -1340,7 +1345,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e continue } - if strings.EqualFold(mK, fieldName) { + if d.config.MatchName(mK, fieldName) { rawMapKey = dataValKey rawMapVal = dataVal.MapIndex(dataValKey) break diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml index fbb43744..b097728d 100644 --- a/vendor/github.com/modern-go/reflect2/.travis.yml +++ b/vendor/github.com/modern-go/reflect2/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.8.x + - 1.9.x - 1.x before_install: diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock index 2a3a6989..10ef8111 100644 --- a/vendor/github.com/modern-go/reflect2/Gopkg.lock +++ b/vendor/github.com/modern-go/reflect2/Gopkg.lock @@ -1,15 +1,9 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" - version = "1.0.0" - [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7" + input-imports = [] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml index 2f4f4dbd..a9bc5061 100644 --- a/vendor/github.com/modern-go/reflect2/Gopkg.toml +++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml @@ -26,10 +26,6 @@ ignored = [] -[[constraint]] - name = "github.com/modern-go/concurrent" - version = "1.0.0" - [prune] go-tests = true unused-packages = true diff --git a/vendor/github.com/modern-go/reflect2/go_above_118.go b/vendor/github.com/modern-go/reflect2/go_above_118.go new file mode 100644 index 00000000..2b4116f6 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_above_118.go @@ -0,0 +1,23 @@ +//+build go1.18 + +package reflect2 + +import ( + "unsafe" +) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer, it *hiter) + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + var it hiter + mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj), &it) + return &UnsafeMapIterator{ + hiter: &it, + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +} \ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go deleted file mode 100644 index 5c1cea86..00000000 --- a/vendor/github.com/modern-go/reflect2/go_above_17.go +++ /dev/null @@ -1,8 +0,0 @@ -//+build go1.7 - -package reflect2 - -import "unsafe" - -//go:linkname resolveTypeOff reflect.resolveTypeOff -func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go index c7e3b780..974f7685 100644 --- a/vendor/github.com/modern-go/reflect2/go_above_19.go +++ b/vendor/github.com/modern-go/reflect2/go_above_19.go @@ -6,6 +6,9 @@ import ( "unsafe" ) +//go:linkname resolveTypeOff reflect.resolveTypeOff +func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer + //go:linkname makemap reflect.makemap func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer) diff --git a/vendor/github.com/modern-go/reflect2/go_below_118.go b/vendor/github.com/modern-go/reflect2/go_below_118.go new file mode 100644 index 00000000..00003dbd --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_below_118.go @@ -0,0 +1,21 @@ +//+build !go1.18 + +package reflect2 + +import ( + "unsafe" +) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) (val *hiter) + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + return &UnsafeMapIterator{ + hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +} \ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go deleted file mode 100644 index 65a93c88..00000000 --- a/vendor/github.com/modern-go/reflect2/go_below_17.go +++ /dev/null @@ -1,9 +0,0 @@ -//+build !go1.7 - -package reflect2 - -import "unsafe" - -func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { - return nil -} diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go deleted file mode 100644 index b050ef70..00000000 --- a/vendor/github.com/modern-go/reflect2/go_below_19.go +++ /dev/null @@ -1,14 +0,0 @@ -//+build !go1.9 - -package reflect2 - -import ( - "unsafe" -) - -//go:linkname makemap reflect.makemap -func makemap(rtype unsafe.Pointer) (m unsafe.Pointer) - -func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer { - return makemap(rtype) -} diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go index 63b49c79..c43c8b9d 100644 --- a/vendor/github.com/modern-go/reflect2/reflect2.go +++ b/vendor/github.com/modern-go/reflect2/reflect2.go @@ -1,8 +1,9 @@ package reflect2 import ( - "github.com/modern-go/concurrent" "reflect" + "runtime" + "sync" "unsafe" ) @@ -130,13 +131,13 @@ var ConfigSafe = Config{UseSafeImplementation: true}.Froze() type frozenConfig struct { useSafeImplementation bool - cache *concurrent.Map + cache *sync.Map } func (cfg Config) Froze() *frozenConfig { return &frozenConfig{ useSafeImplementation: cfg.UseSafeImplementation, - cache: concurrent.NewMap(), + cache: new(sync.Map), } } @@ -288,11 +289,12 @@ func NoEscape(p unsafe.Pointer) unsafe.Pointer { } func UnsafeCastString(str string) []byte { + bytes := make([]byte, 0) stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str)) - sliceHeader := &reflect.SliceHeader{ - Data: stringHeader.Data, - Cap: stringHeader.Len, - Len: stringHeader.Len, - } - return *(*[]byte)(unsafe.Pointer(sliceHeader)) + sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes)) + sliceHeader.Data = stringHeader.Data + sliceHeader.Cap = stringHeader.Len + sliceHeader.Len = stringHeader.Len + runtime.KeepAlive(str) + return bytes } diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh deleted file mode 100644 index 3d2b9768..00000000 --- a/vendor/github.com/modern-go/reflect2/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do - go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go index 3acfb558..4b13c315 100644 --- a/vendor/github.com/modern-go/reflect2/type_map.go +++ b/vendor/github.com/modern-go/reflect2/type_map.go @@ -1,17 +1,13 @@ +// +build !gccgo + package reflect2 import ( "reflect" - "runtime" - "strings" "sync" "unsafe" ) -// typelinks1 for 1.5 ~ 1.6 -//go:linkname typelinks1 reflect.typelinks -func typelinks1() [][]unsafe.Pointer - // typelinks2 for 1.7 ~ //go:linkname typelinks2 reflect.typelinks func typelinks2() (sections []unsafe.Pointer, offset [][]int32) @@ -27,49 +23,10 @@ func discoverTypes() { types = make(map[string]reflect.Type) packages = make(map[string]map[string]reflect.Type) - ver := runtime.Version() - if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { - loadGo15Types() - } else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") { - loadGo15Types() - } else { - loadGo17Types() - } -} - -func loadGo15Types() { - var obj interface{} = reflect.TypeOf(0) - typePtrss := typelinks1() - for _, typePtrs := range typePtrss { - for _, typePtr := range typePtrs { - (*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr - typ := obj.(reflect.Type) - if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct { - loadedType := typ.Elem() - pkgTypes := packages[loadedType.PkgPath()] - if pkgTypes == nil { - pkgTypes = map[string]reflect.Type{} - packages[loadedType.PkgPath()] = pkgTypes - } - types[loadedType.String()] = loadedType - pkgTypes[loadedType.Name()] = loadedType - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr && - typ.Elem().Elem().Kind() == reflect.Struct { - loadedType := typ.Elem().Elem() - pkgTypes := packages[loadedType.PkgPath()] - if pkgTypes == nil { - pkgTypes = map[string]reflect.Type{} - packages[loadedType.PkgPath()] = pkgTypes - } - types[loadedType.String()] = loadedType - pkgTypes[loadedType.Name()] = loadedType - } - } - } + loadGoTypes() } -func loadGo17Types() { +func loadGoTypes() { var obj interface{} = reflect.TypeOf(0) sections, offset := typelinks2() for i, offs := range offset { diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go index 57229c8d..b49f614e 100644 --- a/vendor/github.com/modern-go/reflect2/unsafe_link.go +++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go @@ -19,18 +19,12 @@ func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int //go:linkname mapassign reflect.mapassign //go:noescape -func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer) +func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer, val unsafe.Pointer) //go:linkname mapaccess reflect.mapaccess //go:noescape func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) -// m escapes into the return value, but the caller of mapiterinit -// doesn't let the return value escape. -//go:noescape -//go:linkname mapiterinit reflect.mapiterinit -func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter - //go:noescape //go:linkname mapiternext reflect.mapiternext func mapiternext(it *hiter) @@ -42,9 +36,21 @@ func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer) // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate // the layout of this structure. type hiter struct { - key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go). - value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). - // rest fields are ignored + key unsafe.Pointer + value unsafe.Pointer + t unsafe.Pointer + h unsafe.Pointer + buckets unsafe.Pointer + bptr unsafe.Pointer + overflow *[]unsafe.Pointer + oldoverflow *[]unsafe.Pointer + startBucket uintptr + offset uint8 + wrapped bool + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr } // add returns p+x. diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go index f2e76e6b..37872da8 100644 --- a/vendor/github.com/modern-go/reflect2/unsafe_map.go +++ b/vendor/github.com/modern-go/reflect2/unsafe_map.go @@ -107,14 +107,6 @@ func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator { return type2.UnsafeIterate(objEFace.data) } -func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { - return &UnsafeMapIterator{ - hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), - pKeyRType: type2.pKeyRType, - pElemRType: type2.pElemRType, - } -} - type UnsafeMapIterator struct { *hiter pKeyRType unsafe.Pointer diff --git a/vendor/github.com/nxadm/tail/.gitignore b/vendor/github.com/nxadm/tail/.gitignore deleted file mode 100644 index 35d9351d..00000000 --- a/vendor/github.com/nxadm/tail/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.idea/ -.test/ -examples/_* \ No newline at end of file diff --git a/vendor/github.com/nxadm/tail/CHANGES.md b/vendor/github.com/nxadm/tail/CHANGES.md deleted file mode 100644 index 224e54b4..00000000 --- a/vendor/github.com/nxadm/tail/CHANGES.md +++ /dev/null @@ -1,56 +0,0 @@ -# Version v1.4.7-v1.4.8 -* Documentation updates. -* Small linter cleanups. -* Added example in test. - -# Version v1.4.6 - -* Document the usage of Cleanup when re-reading a file (thanks to @lesovsky) for issue #18. -* Add example directories with example and tests for issues. - -# Version v1.4.4-v1.4.5 - -* Fix of checksum problem because of forced tag. No changes to the code. - -# Version v1.4.1 - -* Incorporated PR 162 by by Mohammed902: "Simplify non-Windows build tag". - -# Version v1.4.0 - -* Incorporated PR 9 by mschneider82: "Added seekinfo to Tail". - -# Version v1.3.1 - -* Incorporated PR 7: "Fix deadlock when stopping on non-empty file/buffer", -fixes upstream issue 93. - - -# Version v1.3.0 - -* Incorporated changes of unmerged upstream PR 149 by mezzi: "added line num -to Line struct". - -# Version v1.2.1 - -* Incorporated changes of unmerged upstream PR 128 by jadekler: "Compile-able -code in readme". -* Incorporated changes of unmerged upstream PR 130 by fgeller: "small change -to comment wording". -* Incorporated changes of unmerged upstream PR 133 by sm3142: "removed -spurious newlines from log messages". - -# Version v1.2.0 - -* Incorporated changes of unmerged upstream PR 126 by Code-Hex: "Solved the - problem for never return the last line if it's not followed by a newline". -* Incorporated changes of unmerged upstream PR 131 by StoicPerlman: "Remove -deprecated os.SEEK consts". The changes bumped the minimal supported Go -release to 1.9. - -# Version v1.1.0 - -* migration to go modules. -* release of master branch of the dormant upstream, because it contains -fixes and improvement no present in the tagged release. - diff --git a/vendor/github.com/nxadm/tail/Dockerfile b/vendor/github.com/nxadm/tail/Dockerfile deleted file mode 100644 index d9633891..00000000 --- a/vendor/github.com/nxadm/tail/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM golang - -RUN mkdir -p $GOPATH/src/github.com/nxadm/tail/ -ADD . $GOPATH/src/github.com/nxadm/tail/ - -# expecting to fetch dependencies successfully. -RUN go get -v github.com/nxadm/tail - -# expecting to run the test successfully. -RUN go test -v github.com/nxadm/tail - -# expecting to install successfully -RUN go install -v github.com/nxadm/tail -RUN go install -v github.com/nxadm/tail/cmd/gotail - -RUN $GOPATH/bin/gotail -h || true - -ENV PATH $GOPATH/bin:$PATH -CMD ["gotail"] diff --git a/vendor/github.com/nxadm/tail/README.md b/vendor/github.com/nxadm/tail/README.md deleted file mode 100644 index f47939c7..00000000 --- a/vendor/github.com/nxadm/tail/README.md +++ /dev/null @@ -1,44 +0,0 @@ -![ci](https://github.com/nxadm/tail/workflows/ci/badge.svg)[![Go Reference](https://pkg.go.dev/badge/github.com/nxadm/tail.svg)](https://pkg.go.dev/github.com/nxadm/tail) - -# tail functionality in Go - -nxadm/tail provides a Go library that emulates the features of the BSD `tail` -program. The library comes with full support for truncation/move detection as -it is designed to work with log rotation tools. The library works on all -operating systems supported by Go, including POSIX systems like Linux and -*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported. - -A simple example: - -```Go -// Create a tail -t, err := tail.TailFile( - "/var/log/nginx.log", tail.Config{Follow: true, ReOpen: true}) -if err != nil { - panic(err) -} - -// Print the text of each received line -for line := range t.Lines { - fmt.Println(line.Text) -} -``` - -See [API documentation](https://pkg.go.dev/github.com/nxadm/tail). - -## Installing - - go get github.com/nxadm/tail/... - -## History - -This project is an active, drop-in replacement for the -[abandoned](https://en.wikipedia.org/wiki/HPE_Helion) Go tail library at -[hpcloud](https://github.com/hpcloud/tail). Next to -[addressing open issues/PRs of the original project](https://github.com/nxadm/tail/issues/6), -nxadm/tail continues the development by keeping up to date with the Go toolchain -(e.g. go modules) and dependencies, completing the documentation, adding features -and fixing bugs. - -## Examples -Examples, e.g. used to debug an issue, are kept in the [examples directory](/examples). \ No newline at end of file diff --git a/vendor/github.com/nxadm/tail/go.mod b/vendor/github.com/nxadm/tail/go.mod deleted file mode 100644 index 5de9a606..00000000 --- a/vendor/github.com/nxadm/tail/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/nxadm/tail - -go 1.13 - -require ( - github.com/fsnotify/fsnotify v1.4.9 - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 -) diff --git a/vendor/github.com/nxadm/tail/go.sum b/vendor/github.com/nxadm/tail/go.sum deleted file mode 100644 index 3485daed..00000000 --- a/vendor/github.com/nxadm/tail/go.sum +++ /dev/null @@ -1,6 +0,0 @@ -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= diff --git a/vendor/github.com/nxadm/tail/ratelimiter/Licence b/vendor/github.com/nxadm/tail/ratelimiter/Licence deleted file mode 100644 index 434aab19..00000000 --- a/vendor/github.com/nxadm/tail/ratelimiter/Licence +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (C) 2013 99designs - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go b/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go deleted file mode 100644 index 358b69e7..00000000 --- a/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends. -package ratelimiter - -import ( - "time" -) - -type LeakyBucket struct { - Size uint16 - Fill float64 - LeakInterval time.Duration // time.Duration for 1 unit of size to leak - Lastupdate time.Time - Now func() time.Time -} - -func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket { - bucket := LeakyBucket{ - Size: size, - Fill: 0, - LeakInterval: leakInterval, - Now: time.Now, - Lastupdate: time.Now(), - } - - return &bucket -} - -func (b *LeakyBucket) updateFill() { - now := b.Now() - if b.Fill > 0 { - elapsed := now.Sub(b.Lastupdate) - - b.Fill -= float64(elapsed) / float64(b.LeakInterval) - if b.Fill < 0 { - b.Fill = 0 - } - } - b.Lastupdate = now -} - -func (b *LeakyBucket) Pour(amount uint16) bool { - b.updateFill() - - var newfill float64 = b.Fill + float64(amount) - - if newfill > float64(b.Size) { - return false - } - - b.Fill = newfill - - return true -} - -// The time at which this bucket will be completely drained -func (b *LeakyBucket) DrainedAt() time.Time { - return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval))) -} - -// The duration until this bucket is completely drained -func (b *LeakyBucket) TimeToDrain() time.Duration { - return b.DrainedAt().Sub(b.Now()) -} - -func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration { - return b.Now().Sub(b.Lastupdate) -} - -type LeakyBucketSer struct { - Size uint16 - Fill float64 - LeakInterval time.Duration // time.Duration for 1 unit of size to leak - Lastupdate time.Time -} - -func (b *LeakyBucket) Serialise() *LeakyBucketSer { - bucket := LeakyBucketSer{ - Size: b.Size, - Fill: b.Fill, - LeakInterval: b.LeakInterval, - Lastupdate: b.Lastupdate, - } - - return &bucket -} - -func (b *LeakyBucketSer) DeSerialise() *LeakyBucket { - bucket := LeakyBucket{ - Size: b.Size, - Fill: b.Fill, - LeakInterval: b.LeakInterval, - Lastupdate: b.Lastupdate, - Now: time.Now, - } - - return &bucket -} diff --git a/vendor/github.com/nxadm/tail/ratelimiter/memory.go b/vendor/github.com/nxadm/tail/ratelimiter/memory.go deleted file mode 100644 index bf3c2131..00000000 --- a/vendor/github.com/nxadm/tail/ratelimiter/memory.go +++ /dev/null @@ -1,60 +0,0 @@ -package ratelimiter - -import ( - "errors" - "time" -) - -const ( - GC_SIZE int = 100 - GC_PERIOD time.Duration = 60 * time.Second -) - -type Memory struct { - store map[string]LeakyBucket - lastGCCollected time.Time -} - -func NewMemory() *Memory { - m := new(Memory) - m.store = make(map[string]LeakyBucket) - m.lastGCCollected = time.Now() - return m -} - -func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) { - - bucket, ok := m.store[key] - if !ok { - return nil, errors.New("miss") - } - - return &bucket, nil -} - -func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error { - - if len(m.store) > GC_SIZE { - m.GarbageCollect() - } - - m.store[key] = bucket - - return nil -} - -func (m *Memory) GarbageCollect() { - now := time.Now() - - // rate limit GC to once per minute - if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() { - for key, bucket := range m.store { - // if the bucket is drained, then GC - if bucket.DrainedAt().Unix() < now.Unix() { - delete(m.store, key) - } - } - - m.lastGCCollected = now - } -} diff --git a/vendor/github.com/nxadm/tail/ratelimiter/storage.go b/vendor/github.com/nxadm/tail/ratelimiter/storage.go deleted file mode 100644 index 89b2fe88..00000000 --- a/vendor/github.com/nxadm/tail/ratelimiter/storage.go +++ /dev/null @@ -1,6 +0,0 @@ -package ratelimiter - -type Storage interface { - GetBucketFor(string) (*LeakyBucket, error) - SetBucketFor(string, LeakyBucket) error -} diff --git a/vendor/github.com/nxadm/tail/tail.go b/vendor/github.com/nxadm/tail/tail.go deleted file mode 100644 index 37ea4411..00000000 --- a/vendor/github.com/nxadm/tail/tail.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail -// Copyright (c) 2015 HPE Software Inc. All rights reserved. -// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. - -//nxadm/tail provides a Go library that emulates the features of the BSD `tail` -//program. The library comes with full support for truncation/move detection as -//it is designed to work with log rotation tools. The library works on all -//operating systems supported by Go, including POSIX systems like Linux and -//*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported. -package tail - -import ( - "bufio" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "strings" - "sync" - "time" - - "github.com/nxadm/tail/ratelimiter" - "github.com/nxadm/tail/util" - "github.com/nxadm/tail/watch" - "gopkg.in/tomb.v1" -) - -var ( - // ErrStop is returned when the tail of a file has been marked to be stopped. - ErrStop = errors.New("tail should now stop") -) - -type Line struct { - Text string // The contents of the file - Num int // The line number - SeekInfo SeekInfo // SeekInfo - Time time.Time // Present time - Err error // Error from tail -} - -// Deprecated: this function is no longer used internally and it has little of no -// use in the API. As such, it will be removed from the API in a future major -// release. -// -// NewLine returns a * pointer to a Line struct. -func NewLine(text string, lineNum int) *Line { - return &Line{text, lineNum, SeekInfo{}, time.Now(), nil} -} - -// SeekInfo represents arguments to io.Seek. See: https://golang.org/pkg/io/#SectionReader.Seek -type SeekInfo struct { - Offset int64 - Whence int -} - -type logger interface { - Fatal(v ...interface{}) - Fatalf(format string, v ...interface{}) - Fatalln(v ...interface{}) - Panic(v ...interface{}) - Panicf(format string, v ...interface{}) - Panicln(v ...interface{}) - Print(v ...interface{}) - Printf(format string, v ...interface{}) - Println(v ...interface{}) -} - -// Config is used to specify how a file must be tailed. -type Config struct { - // File-specifc - Location *SeekInfo // Tail from this location. If nil, start at the beginning of the file - ReOpen bool // Reopen recreated files (tail -F) - MustExist bool // Fail early if the file does not exist - Poll bool // Poll for file changes instead of using the default inotify - Pipe bool // The file is a named pipe (mkfifo) - - // Generic IO - Follow bool // Continue looking for new lines (tail -f) - MaxLineSize int // If non-zero, split longer lines into multiple lines - - // Optionally, use a ratelimiter (e.g. created by the ratelimiter/NewLeakyBucket function) - RateLimiter *ratelimiter.LeakyBucket - - // Optionally use a Logger. When nil, the Logger is set to tail.DefaultLogger. - // To disable logging, set it to tail.DiscardingLogger - Logger logger -} - -type Tail struct { - Filename string // The filename - Lines chan *Line // A consumable channel of *Line - Config // Tail.Configuration - - file *os.File - reader *bufio.Reader - lineNum int - - watcher watch.FileWatcher - changes *watch.FileChanges - - tomb.Tomb // provides: Done, Kill, Dying - - lk sync.Mutex -} - -var ( - // DefaultLogger logs to os.Stderr and it is used when Config.Logger == nil - DefaultLogger = log.New(os.Stderr, "", log.LstdFlags) - // DiscardingLogger can be used to disable logging output - DiscardingLogger = log.New(ioutil.Discard, "", 0) -) - -// TailFile begins tailing the file. And returns a pointer to a Tail struct -// and an error. An output stream is made available via the Tail.Lines -// channel (e.g. to be looped and printed). To handle errors during tailing, -// after finishing reading from the Lines channel, invoke the `Wait` or `Err` -// method on the returned *Tail. -func TailFile(filename string, config Config) (*Tail, error) { - if config.ReOpen && !config.Follow { - util.Fatal("cannot set ReOpen without Follow.") - } - - t := &Tail{ - Filename: filename, - Lines: make(chan *Line), - Config: config, - } - - // when Logger was not specified in config, use default logger - if t.Logger == nil { - t.Logger = DefaultLogger - } - - if t.Poll { - t.watcher = watch.NewPollingFileWatcher(filename) - } else { - t.watcher = watch.NewInotifyFileWatcher(filename) - } - - if t.MustExist { - var err error - t.file, err = OpenFile(t.Filename) - if err != nil { - return nil, err - } - } - - go t.tailFileSync() - - return t, nil -} - -// Tell returns the file's current position, like stdio's ftell() and an error. -// Beware that this value may not be completely accurate because one line from -// the chan(tail.Lines) may have been read already. -func (tail *Tail) Tell() (offset int64, err error) { - if tail.file == nil { - return - } - offset, err = tail.file.Seek(0, io.SeekCurrent) - if err != nil { - return - } - - tail.lk.Lock() - defer tail.lk.Unlock() - if tail.reader == nil { - return - } - - offset -= int64(tail.reader.Buffered()) - return -} - -// Stop stops the tailing activity. -func (tail *Tail) Stop() error { - tail.Kill(nil) - return tail.Wait() -} - -// StopAtEOF stops tailing as soon as the end of the file is reached. The function -// returns an error, -func (tail *Tail) StopAtEOF() error { - tail.Kill(errStopAtEOF) - return tail.Wait() -} - -var errStopAtEOF = errors.New("tail: stop at eof") - -func (tail *Tail) close() { - close(tail.Lines) - tail.closeFile() -} - -func (tail *Tail) closeFile() { - if tail.file != nil { - tail.file.Close() - tail.file = nil - } -} - -func (tail *Tail) reopen() error { - tail.closeFile() - tail.lineNum = 0 - for { - var err error - tail.file, err = OpenFile(tail.Filename) - if err != nil { - if os.IsNotExist(err) { - tail.Logger.Printf("Waiting for %s to appear...", tail.Filename) - if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil { - if err == tomb.ErrDying { - return err - } - return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err) - } - continue - } - return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err) - } - break - } - return nil -} - -func (tail *Tail) readLine() (string, error) { - tail.lk.Lock() - line, err := tail.reader.ReadString('\n') - tail.lk.Unlock() - if err != nil { - // Note ReadString "returns the data read before the error" in - // case of an error, including EOF, so we return it as is. The - // caller is expected to process it if err is EOF. - return line, err - } - - line = strings.TrimRight(line, "\n") - - return line, err -} - -func (tail *Tail) tailFileSync() { - defer tail.Done() - defer tail.close() - - if !tail.MustExist { - // deferred first open. - err := tail.reopen() - if err != nil { - if err != tomb.ErrDying { - tail.Kill(err) - } - return - } - } - - // Seek to requested location on first open of the file. - if tail.Location != nil { - _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence) - if err != nil { - tail.Killf("Seek error on %s: %s", tail.Filename, err) - return - } - } - - tail.openReader() - - // Read line by line. - for { - // do not seek in named pipes - if !tail.Pipe { - // grab the position in case we need to back up in the event of a half-line - if _, err := tail.Tell(); err != nil { - tail.Kill(err) - return - } - } - - line, err := tail.readLine() - - // Process `line` even if err is EOF. - if err == nil { - cooloff := !tail.sendLine(line) - if cooloff { - // Wait a second before seeking till the end of - // file when rate limit is reached. - msg := ("Too much log activity; waiting a second before resuming tailing") - offset, _ := tail.Tell() - tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)} - select { - case <-time.After(time.Second): - case <-tail.Dying(): - return - } - if err := tail.seekEnd(); err != nil { - tail.Kill(err) - return - } - } - } else if err == io.EOF { - if !tail.Follow { - if line != "" { - tail.sendLine(line) - } - return - } - - if tail.Follow && line != "" { - tail.sendLine(line) - if err := tail.seekEnd(); err != nil { - tail.Kill(err) - return - } - } - - // When EOF is reached, wait for more data to become - // available. Wait strategy is based on the `tail.watcher` - // implementation (inotify or polling). - err := tail.waitForChanges() - if err != nil { - if err != ErrStop { - tail.Kill(err) - } - return - } - } else { - // non-EOF error - tail.Killf("Error reading %s: %s", tail.Filename, err) - return - } - - select { - case <-tail.Dying(): - if tail.Err() == errStopAtEOF { - continue - } - return - default: - } - } -} - -// waitForChanges waits until the file has been appended, deleted, -// moved or truncated. When moved or deleted - the file will be -// reopened if ReOpen is true. Truncated files are always reopened. -func (tail *Tail) waitForChanges() error { - if tail.changes == nil { - pos, err := tail.file.Seek(0, io.SeekCurrent) - if err != nil { - return err - } - tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos) - if err != nil { - return err - } - } - - select { - case <-tail.changes.Modified: - return nil - case <-tail.changes.Deleted: - tail.changes = nil - if tail.ReOpen { - // XXX: we must not log from a library. - tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename) - if err := tail.reopen(); err != nil { - return err - } - tail.Logger.Printf("Successfully reopened %s", tail.Filename) - tail.openReader() - return nil - } - tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) - return ErrStop - case <-tail.changes.Truncated: - // Always reopen truncated files (Follow is true) - tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename) - if err := tail.reopen(); err != nil { - return err - } - tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename) - tail.openReader() - return nil - case <-tail.Dying(): - return ErrStop - } -} - -func (tail *Tail) openReader() { - tail.lk.Lock() - if tail.MaxLineSize > 0 { - // add 2 to account for newline characters - tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2) - } else { - tail.reader = bufio.NewReader(tail.file) - } - tail.lk.Unlock() -} - -func (tail *Tail) seekEnd() error { - return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd}) -} - -func (tail *Tail) seekTo(pos SeekInfo) error { - _, err := tail.file.Seek(pos.Offset, pos.Whence) - if err != nil { - return fmt.Errorf("Seek error on %s: %s", tail.Filename, err) - } - // Reset the read buffer whenever the file is re-seek'ed - tail.reader.Reset(tail.file) - return nil -} - -// sendLine sends the line(s) to Lines channel, splitting longer lines -// if necessary. Return false if rate limit is reached. -func (tail *Tail) sendLine(line string) bool { - now := time.Now() - lines := []string{line} - - // Split longer lines - if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize { - lines = util.PartitionString(line, tail.MaxLineSize) - } - - for _, line := range lines { - tail.lineNum++ - offset, _ := tail.Tell() - select { - case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}: - case <-tail.Dying(): - return true - } - } - - if tail.Config.RateLimiter != nil { - ok := tail.Config.RateLimiter.Pour(uint16(len(lines))) - if !ok { - tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.", - tail.Filename) - return false - } - } - - return true -} - -// Cleanup removes inotify watches added by the tail package. This function is -// meant to be invoked from a process's exit handler. Linux kernel may not -// automatically remove inotify watches after the process exits. -// If you plan to re-read a file, don't call Cleanup in between. -func (tail *Tail) Cleanup() { - watch.Cleanup(tail.Filename) -} diff --git a/vendor/github.com/nxadm/tail/tail_posix.go b/vendor/github.com/nxadm/tail/tail_posix.go deleted file mode 100644 index 23e071de..00000000 --- a/vendor/github.com/nxadm/tail/tail_posix.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail -// +build !windows - -package tail - -import ( - "os" -) - -// Deprecated: this function is only useful internally and, as such, -// it will be removed from the API in a future major release. -// -// OpenFile proxies a os.Open call for a file so it can be correctly tailed -// on POSIX and non-POSIX OSes like MS Windows. -func OpenFile(name string) (file *os.File, err error) { - return os.Open(name) -} diff --git a/vendor/github.com/nxadm/tail/tail_windows.go b/vendor/github.com/nxadm/tail/tail_windows.go deleted file mode 100644 index da0d2f39..00000000 --- a/vendor/github.com/nxadm/tail/tail_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail -// +build windows - -package tail - -import ( - "os" - - "github.com/nxadm/tail/winfile" -) - -// Deprecated: this function is only useful internally and, as such, -// it will be removed from the API in a future major release. -// -// OpenFile proxies a os.Open call for a file so it can be correctly tailed -// on POSIX and non-POSIX OSes like MS Windows. -func OpenFile(name string) (file *os.File, err error) { - return winfile.OpenFile(name, os.O_RDONLY, 0) -} diff --git a/vendor/github.com/nxadm/tail/util/util.go b/vendor/github.com/nxadm/tail/util/util.go deleted file mode 100644 index b64caa21..00000000 --- a/vendor/github.com/nxadm/tail/util/util.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail -// Copyright (c) 2015 HPE Software Inc. All rights reserved. -// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. - -package util - -import ( - "fmt" - "log" - "os" - "runtime/debug" -) - -type Logger struct { - *log.Logger -} - -var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)} - -// fatal is like panic except it displays only the current goroutine's stack. -func Fatal(format string, v ...interface{}) { - // https://github.com/nxadm/log/blob/master/log.go#L45 - LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack())) - os.Exit(1) -} - -// partitionString partitions the string into chunks of given size, -// with the last chunk of variable size. -func PartitionString(s string, chunkSize int) []string { - if chunkSize <= 0 { - panic("invalid chunkSize") - } - length := len(s) - chunks := 1 + length/chunkSize - start := 0 - end := chunkSize - parts := make([]string, 0, chunks) - for { - if end > length { - end = length - } - parts = append(parts, s[start:end]) - if end == length { - break - } - start, end = end, end+chunkSize - } - return parts -} diff --git a/vendor/github.com/nxadm/tail/watch/filechanges.go b/vendor/github.com/nxadm/tail/watch/filechanges.go deleted file mode 100644 index 5b65f42a..00000000 --- a/vendor/github.com/nxadm/tail/watch/filechanges.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail -package watch - -type FileChanges struct { - Modified chan bool // Channel to get notified of modifications - Truncated chan bool // Channel to get notified of truncations - Deleted chan bool // Channel to get notified of deletions/renames -} - -func NewFileChanges() *FileChanges { - return &FileChanges{ - make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)} -} - -func (fc *FileChanges) NotifyModified() { - sendOnlyIfEmpty(fc.Modified) -} - -func (fc *FileChanges) NotifyTruncated() { - sendOnlyIfEmpty(fc.Truncated) -} - -func (fc *FileChanges) NotifyDeleted() { - sendOnlyIfEmpty(fc.Deleted) -} - -// sendOnlyIfEmpty sends on a bool channel only if the channel has no -// backlog to be read by other goroutines. This concurrency pattern -// can be used to notify other goroutines if and only if they are -// looking for it (i.e., subsequent notifications can be compressed -// into one). -func sendOnlyIfEmpty(ch chan bool) { - select { - case ch <- true: - default: - } -} diff --git a/vendor/github.com/nxadm/tail/watch/inotify.go b/vendor/github.com/nxadm/tail/watch/inotify.go deleted file mode 100644 index cbd11ad8..00000000 --- a/vendor/github.com/nxadm/tail/watch/inotify.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail -// Copyright (c) 2015 HPE Software Inc. All rights reserved. -// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. - -package watch - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/nxadm/tail/util" - - "github.com/fsnotify/fsnotify" - "gopkg.in/tomb.v1" -) - -// InotifyFileWatcher uses inotify to monitor file changes. -type InotifyFileWatcher struct { - Filename string - Size int64 -} - -func NewInotifyFileWatcher(filename string) *InotifyFileWatcher { - fw := &InotifyFileWatcher{filepath.Clean(filename), 0} - return fw -} - -func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error { - err := WatchCreate(fw.Filename) - if err != nil { - return err - } - defer RemoveWatchCreate(fw.Filename) - - // Do a real check now as the file might have been created before - // calling `WatchFlags` above. - if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) { - // file exists, or stat returned an error. - return err - } - - events := Events(fw.Filename) - - for { - select { - case evt, ok := <-events: - if !ok { - return fmt.Errorf("inotify watcher has been closed") - } - evtName, err := filepath.Abs(evt.Name) - if err != nil { - return err - } - fwFilename, err := filepath.Abs(fw.Filename) - if err != nil { - return err - } - if evtName == fwFilename { - return nil - } - case <-t.Dying(): - return tomb.ErrDying - } - } - panic("unreachable") -} - -func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { - err := Watch(fw.Filename) - if err != nil { - return nil, err - } - - changes := NewFileChanges() - fw.Size = pos - - go func() { - - events := Events(fw.Filename) - - for { - prevSize := fw.Size - - var evt fsnotify.Event - var ok bool - - select { - case evt, ok = <-events: - if !ok { - RemoveWatch(fw.Filename) - return - } - case <-t.Dying(): - RemoveWatch(fw.Filename) - return - } - - switch { - case evt.Op&fsnotify.Remove == fsnotify.Remove: - fallthrough - - case evt.Op&fsnotify.Rename == fsnotify.Rename: - RemoveWatch(fw.Filename) - changes.NotifyDeleted() - return - - //With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod) - case evt.Op&fsnotify.Chmod == fsnotify.Chmod: - fallthrough - - case evt.Op&fsnotify.Write == fsnotify.Write: - fi, err := os.Stat(fw.Filename) - if err != nil { - if os.IsNotExist(err) { - RemoveWatch(fw.Filename) - changes.NotifyDeleted() - return - } - // XXX: report this error back to the user - util.Fatal("Failed to stat file %v: %v", fw.Filename, err) - } - fw.Size = fi.Size() - - if prevSize > 0 && prevSize > fw.Size { - changes.NotifyTruncated() - } else { - changes.NotifyModified() - } - prevSize = fw.Size - } - } - }() - - return changes, nil -} diff --git a/vendor/github.com/nxadm/tail/watch/inotify_tracker.go b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go deleted file mode 100644 index cb9572a0..00000000 --- a/vendor/github.com/nxadm/tail/watch/inotify_tracker.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail -// Copyright (c) 2015 HPE Software Inc. All rights reserved. -// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. - -package watch - -import ( - "log" - "os" - "path/filepath" - "sync" - "syscall" - - "github.com/nxadm/tail/util" - - "github.com/fsnotify/fsnotify" -) - -type InotifyTracker struct { - mux sync.Mutex - watcher *fsnotify.Watcher - chans map[string]chan fsnotify.Event - done map[string]chan bool - watchNums map[string]int - watch chan *watchInfo - remove chan *watchInfo - error chan error -} - -type watchInfo struct { - op fsnotify.Op - fname string -} - -func (this *watchInfo) isCreate() bool { - return this.op == fsnotify.Create -} - -var ( - // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used - shared *InotifyTracker - - // these are used to ensure the shared InotifyTracker is run exactly once - once = sync.Once{} - goRun = func() { - shared = &InotifyTracker{ - mux: sync.Mutex{}, - chans: make(map[string]chan fsnotify.Event), - done: make(map[string]chan bool), - watchNums: make(map[string]int), - watch: make(chan *watchInfo), - remove: make(chan *watchInfo), - error: make(chan error), - } - go shared.run() - } - - logger = log.New(os.Stderr, "", log.LstdFlags) -) - -// Watch signals the run goroutine to begin watching the input filename -func Watch(fname string) error { - return watch(&watchInfo{ - fname: fname, - }) -} - -// Watch create signals the run goroutine to begin watching the input filename -// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate -func WatchCreate(fname string) error { - return watch(&watchInfo{ - op: fsnotify.Create, - fname: fname, - }) -} - -func watch(winfo *watchInfo) error { - // start running the shared InotifyTracker if not already running - once.Do(goRun) - - winfo.fname = filepath.Clean(winfo.fname) - shared.watch <- winfo - return <-shared.error -} - -// RemoveWatch signals the run goroutine to remove the watch for the input filename -func RemoveWatch(fname string) error { - return remove(&watchInfo{ - fname: fname, - }) -} - -// RemoveWatch create signals the run goroutine to remove the watch for the input filename -func RemoveWatchCreate(fname string) error { - return remove(&watchInfo{ - op: fsnotify.Create, - fname: fname, - }) -} - -func remove(winfo *watchInfo) error { - // start running the shared InotifyTracker if not already running - once.Do(goRun) - - winfo.fname = filepath.Clean(winfo.fname) - shared.mux.Lock() - done := shared.done[winfo.fname] - if done != nil { - delete(shared.done, winfo.fname) - close(done) - } - shared.mux.Unlock() - - shared.remove <- winfo - return <-shared.error -} - -// Events returns a channel to which FileEvents corresponding to the input filename -// will be sent. This channel will be closed when removeWatch is called on this -// filename. -func Events(fname string) <-chan fsnotify.Event { - shared.mux.Lock() - defer shared.mux.Unlock() - - return shared.chans[fname] -} - -// Cleanup removes the watch for the input filename if necessary. -func Cleanup(fname string) error { - return RemoveWatch(fname) -} - -// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating -// a new Watcher if the previous Watcher was closed. -func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { - shared.mux.Lock() - defer shared.mux.Unlock() - - if shared.chans[winfo.fname] == nil { - shared.chans[winfo.fname] = make(chan fsnotify.Event) - } - if shared.done[winfo.fname] == nil { - shared.done[winfo.fname] = make(chan bool) - } - - fname := winfo.fname - if winfo.isCreate() { - // Watch for new files to be created in the parent directory. - fname = filepath.Dir(fname) - } - - var err error - // already in inotify watch - if shared.watchNums[fname] == 0 { - err = shared.watcher.Add(fname) - } - if err == nil { - shared.watchNums[fname]++ - } - return err -} - -// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the -// corresponding events channel. -func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error { - shared.mux.Lock() - - ch := shared.chans[winfo.fname] - if ch != nil { - delete(shared.chans, winfo.fname) - close(ch) - } - - fname := winfo.fname - if winfo.isCreate() { - // Watch for new files to be created in the parent directory. - fname = filepath.Dir(fname) - } - shared.watchNums[fname]-- - watchNum := shared.watchNums[fname] - if watchNum == 0 { - delete(shared.watchNums, fname) - } - shared.mux.Unlock() - - var err error - // If we were the last ones to watch this file, unsubscribe from inotify. - // This needs to happen after releasing the lock because fsnotify waits - // synchronously for the kernel to acknowledge the removal of the watch - // for this file, which causes us to deadlock if we still held the lock. - if watchNum == 0 { - err = shared.watcher.Remove(fname) - } - - return err -} - -// sendEvent sends the input event to the appropriate Tail. -func (shared *InotifyTracker) sendEvent(event fsnotify.Event) { - name := filepath.Clean(event.Name) - - shared.mux.Lock() - ch := shared.chans[name] - done := shared.done[name] - shared.mux.Unlock() - - if ch != nil && done != nil { - select { - case ch <- event: - case <-done: - } - } -} - -// run starts the goroutine in which the shared struct reads events from its -// Watcher's Event channel and sends the events to the appropriate Tail. -func (shared *InotifyTracker) run() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - util.Fatal("failed to create Watcher") - } - shared.watcher = watcher - - for { - select { - case winfo := <-shared.watch: - shared.error <- shared.addWatch(winfo) - - case winfo := <-shared.remove: - shared.error <- shared.removeWatch(winfo) - - case event, open := <-shared.watcher.Events: - if !open { - return - } - shared.sendEvent(event) - - case err, open := <-shared.watcher.Errors: - if !open { - return - } else if err != nil { - sysErr, ok := err.(*os.SyscallError) - if !ok || sysErr.Err != syscall.EINTR { - logger.Printf("Error in Watcher Error channel: %s", err) - } - } - } - } -} diff --git a/vendor/github.com/nxadm/tail/watch/polling.go b/vendor/github.com/nxadm/tail/watch/polling.go deleted file mode 100644 index 74e10aa4..00000000 --- a/vendor/github.com/nxadm/tail/watch/polling.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail -// Copyright (c) 2015 HPE Software Inc. All rights reserved. -// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. - -package watch - -import ( - "os" - "runtime" - "time" - - "github.com/nxadm/tail/util" - "gopkg.in/tomb.v1" -) - -// PollingFileWatcher polls the file for changes. -type PollingFileWatcher struct { - Filename string - Size int64 -} - -func NewPollingFileWatcher(filename string) *PollingFileWatcher { - fw := &PollingFileWatcher{filename, 0} - return fw -} - -var POLL_DURATION time.Duration - -func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error { - for { - if _, err := os.Stat(fw.Filename); err == nil { - return nil - } else if !os.IsNotExist(err) { - return err - } - select { - case <-time.After(POLL_DURATION): - continue - case <-t.Dying(): - return tomb.ErrDying - } - } - panic("unreachable") -} - -func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { - origFi, err := os.Stat(fw.Filename) - if err != nil { - return nil, err - } - - changes := NewFileChanges() - var prevModTime time.Time - - // XXX: use tomb.Tomb to cleanly manage these goroutines. replace - // the fatal (below) with tomb's Kill. - - fw.Size = pos - - go func() { - prevSize := fw.Size - for { - select { - case <-t.Dying(): - return - default: - } - - time.Sleep(POLL_DURATION) - fi, err := os.Stat(fw.Filename) - if err != nil { - // Windows cannot delete a file if a handle is still open (tail keeps one open) - // so it gives access denied to anything trying to read it until all handles are released. - if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) { - // File does not exist (has been deleted). - changes.NotifyDeleted() - return - } - - // XXX: report this error back to the user - util.Fatal("Failed to stat file %v: %v", fw.Filename, err) - } - - // File got moved/renamed? - if !os.SameFile(origFi, fi) { - changes.NotifyDeleted() - return - } - - // File got truncated? - fw.Size = fi.Size() - if prevSize > 0 && prevSize > fw.Size { - changes.NotifyTruncated() - prevSize = fw.Size - continue - } - // File got bigger? - if prevSize > 0 && prevSize < fw.Size { - changes.NotifyModified() - prevSize = fw.Size - continue - } - prevSize = fw.Size - - // File was appended to (changed)? - modTime := fi.ModTime() - if modTime != prevModTime { - prevModTime = modTime - changes.NotifyModified() - } - } - }() - - return changes, nil -} - -func init() { - POLL_DURATION = 250 * time.Millisecond -} diff --git a/vendor/github.com/nxadm/tail/watch/watch.go b/vendor/github.com/nxadm/tail/watch/watch.go deleted file mode 100644 index 2b511280..00000000 --- a/vendor/github.com/nxadm/tail/watch/watch.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail -// Copyright (c) 2015 HPE Software Inc. All rights reserved. -// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. - -package watch - -import "gopkg.in/tomb.v1" - -// FileWatcher monitors file-level events. -type FileWatcher interface { - // BlockUntilExists blocks until the file comes into existence. - BlockUntilExists(*tomb.Tomb) error - - // ChangeEvents reports on changes to a file, be it modification, - // deletion, renames or truncations. Returned FileChanges group of - // channels will be closed, thus become unusable, after a deletion - // or truncation event. - // In order to properly report truncations, ChangeEvents requires - // the caller to pass their current offset in the file. - ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error) -} diff --git a/vendor/github.com/nxadm/tail/winfile/winfile.go b/vendor/github.com/nxadm/tail/winfile/winfile.go deleted file mode 100644 index 4562ac7c..00000000 --- a/vendor/github.com/nxadm/tail/winfile/winfile.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail -// +build windows - -package winfile - -import ( - "os" - "syscall" - "unsafe" -) - -// issue also described here -//https://codereview.appspot.com/8203043/ - -// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218 -func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) { - if len(path) == 0 { - return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND - } - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return syscall.InvalidHandle, err - } - var access uint32 - switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { - case syscall.O_RDONLY: - access = syscall.GENERIC_READ - case syscall.O_WRONLY: - access = syscall.GENERIC_WRITE - case syscall.O_RDWR: - access = syscall.GENERIC_READ | syscall.GENERIC_WRITE - } - if mode&syscall.O_CREAT != 0 { - access |= syscall.GENERIC_WRITE - } - if mode&syscall.O_APPEND != 0 { - access &^= syscall.GENERIC_WRITE - access |= syscall.FILE_APPEND_DATA - } - sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) - var sa *syscall.SecurityAttributes - if mode&syscall.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): - createmode = syscall.CREATE_NEW - case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): - createmode = syscall.CREATE_ALWAYS - case mode&syscall.O_CREAT == syscall.O_CREAT: - createmode = syscall.OPEN_ALWAYS - case mode&syscall.O_TRUNC == syscall.O_TRUNC: - createmode = syscall.TRUNCATE_EXISTING - default: - createmode = syscall.OPEN_EXISTING - } - h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0) - return h, e -} - -// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211 -func makeInheritSa() *syscall.SecurityAttributes { - var sa syscall.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133 -func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) { - r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm)) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61 -func syscallMode(i os.FileMode) (o uint32) { - o |= uint32(i.Perm()) - if i&os.ModeSetuid != 0 { - o |= syscall.S_ISUID - } - if i&os.ModeSetgid != 0 { - o |= syscall.S_ISGID - } - if i&os.ModeSticky != 0 { - o |= syscall.S_ISVTX - } - // No mapping for Go's ModeTemporary (plan9 only). - return -} diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml deleted file mode 100644 index ea0966d5..00000000 --- a/vendor/github.com/onsi/ginkgo/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go -go: - - tip - - 1.16.x - - 1.15.x - -cache: - directories: - - $GOPATH/pkg/mod - -# allow internal package imports, necessary for forked repositories -go_import_path: github.com/onsi/ginkgo - -install: - - GO111MODULE="off" go get -v -t ./... - - GO111MODULE="off" go get golang.org/x/tools/cmd/cover - - GO111MODULE="off" go get github.com/onsi/gomega - - GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo - - export PATH=$GOPATH/bin:$PATH - -script: - - GO111MODULE="on" go mod tidy && git diff --exit-code go.mod go.sum - - go vet - - ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md deleted file mode 100644 index a25ca5e0..00000000 --- a/vendor/github.com/onsi/ginkgo/README.md +++ /dev/null @@ -1,169 +0,0 @@ -![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png) - -[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) - -Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! - -If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW). - -# Ginkgo 2.0 Release Candidate is available! - -An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [ver2](https://github.com/onsi/ginkgo/tree/ver2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion. - -As described in the [changelog](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711). - -Please start exploring and using the V2 release! To get started follow the [Using the Release Candidate](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta) directions in the migration guide. - -## TLDR -Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests. -It is typically (and optionally) paired with the [Gomega](https://github.com/onsi/gomega) matcher library. - -```go -Describe("the strings package", func() { - Context("strings.Contains()", func() { - When("the string contains the substring in the middle", func() { - It("returns `true`", func() { - Expect(strings.Contains("Ginkgo is awesome", "is")).To(BeTrue()) - }) - }) - }) -}) -``` - -## Feature List - -- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite) - -- Ginkgo allows you to write tests in Go using expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style: - - Nestable [`Describe`, `Context` and `When` container blocks](https://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context) - - [`BeforeEach` and `AfterEach` blocks](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown - - [`It` and `Specify` blocks](https://onsi.github.io/ginkgo/#individual-specs-it) that hold your assertions - - [`JustBeforeEach` blocks](https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern). - - [`BeforeSuite` and `AfterSuite` blocks](https://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite. - -- A comprehensive test runner that lets you: - - Mark specs as [pending](https://onsi.github.io/ginkgo/#pending-specs) - - [Focus](https://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line - - Run your tests in [random order](https://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order. - - Break up your test suite into parallel processes for straightforward [test parallelization](https://onsi.github.io/ginkgo/#parallel-specs) - -- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](https://onsi.github.io/ginkgo/#running-tests) and [generating](https://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples: - - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime - - `ginkgo -cover` runs your tests using Go's code coverage tool - - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package - - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression - - `ginkgo -r` runs all tests suites under the current directory - - `ginkgo -v` prints out identifying information for each tests just before it runs - - And much more: run `ginkgo help` for details! - - The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test` - -- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop! - -- Built-in support for testing [asynchronicity](https://onsi.github.io/ginkgo/#asynchronous-tests) - -- Built-in support for [benchmarking](https://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code. - -- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`. - -- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`. - -- [Ginkgo tools for VSCode](https://marketplace.visualstudio.com/items?itemName=joselitofilho.ginkgotestexplorer): just use VSCode's extension installer to install `ginkgoTestExplorer`. - -- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details. - -- A modular architecture that lets you easily: - - Write [custom reporters](https://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](https://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter). - - [Adapt an existing matcher library (or write your own!)](https://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo - -## [Gomega](https://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library - -Ginkgo is best paired with Gomega. Learn more about Gomega [here](https://onsi.github.io/gomega/) - -## [Agouti](https://github.com/sclevine/agouti): A Go Acceptance Testing Framework - -Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](https://agouti.org) - -## Getting Started - -You'll need the Go command-line tools. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed. - -### Global installation -To install the Ginkgo command line interface: -```bash -go get -u github.com/onsi/ginkgo/ginkgo -``` -Note that this will install it to `$GOBIN`, which will need to be in the `$PATH` (or equivalent). Run `go help install` for more information. - -### Go module ["tools package"](https://github.com/golang/go/issues/25922): -Create (or update) a file called `tools/tools.go` with the following contents: -```go -// +build tools - -package tools - -import ( - _ "github.com/onsi/ginkgo/ginkgo" -) - -// This file imports packages that are used when running go generate, or used -// during the development process but not otherwise depended on by built code. -``` -The Ginkgo command can then be run via `go run github.com/onsi/ginkgo/ginkgo`. -This approach allows the version of Ginkgo to be maintained under source control for reproducible results, -and is well suited to automated test pipelines. - -### Bootstrapping -```bash -cd path/to/package/you/want/to/test - -ginkgo bootstrap # set up a new ginkgo suite -ginkgo generate # will create a sample test file. edit this file and add your tests then... - -go test # to run your tests - -ginkgo # also runs your tests - -``` - -## I'm new to Go: What are my testing options? - -Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set. - -With that said, it's great to know what your options are :) - -### What Go gives you out of the box - -Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](https://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library. - -### Matcher libraries for Go's XUnit style tests - -A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction: - -- [testify](https://github.com/stretchr/testify) -- [gocheck](https://labix.org/gocheck) - -You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](https://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests) - -### BDD style testing frameworks - -There are a handful of BDD-style testing frameworks written for Go. Here are a few: - -- [Ginkgo](https://github.com/onsi/ginkgo) ;) -- [GoConvey](https://github.com/smartystreets/goconvey) -- [Goblin](https://github.com/franela/goblin) -- [Mao](https://github.com/azer/mao) -- [Zen](https://github.com/pranavraja/zen) - -Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries. - -Go explore! - -## License - -Ginkgo is MIT-Licensed - -## Contributing - -See [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go deleted file mode 100644 index 3130c778..00000000 --- a/vendor/github.com/onsi/ginkgo/config/config.go +++ /dev/null @@ -1,232 +0,0 @@ -/* -Ginkgo accepts a number of configuration options. - -These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli) - -You can also learn more via - - ginkgo help - -or (I kid you not): - - go test -asdf -*/ -package config - -import ( - "flag" - "time" - - "fmt" -) - -const VERSION = "1.16.5" - -type GinkgoConfigType struct { - RandomSeed int64 - RandomizeAllSpecs bool - RegexScansFilePath bool - FocusStrings []string - SkipStrings []string - SkipMeasurements bool - FailOnPending bool - FailFast bool - FlakeAttempts int - EmitSpecProgress bool - DryRun bool - DebugParallel bool - - ParallelNode int - ParallelTotal int - SyncHost string - StreamHost string -} - -var GinkgoConfig = GinkgoConfigType{} - -type DefaultReporterConfigType struct { - NoColor bool - SlowSpecThreshold float64 - NoisyPendings bool - NoisySkippings bool - Succinct bool - Verbose bool - FullTrace bool - ReportPassed bool - ReportFile string -} - -var DefaultReporterConfig = DefaultReporterConfigType{} - -func processPrefix(prefix string) string { - if prefix != "" { - prefix += "." - } - return prefix -} - -type flagFunc func(string) - -func (f flagFunc) String() string { return "" } -func (f flagFunc) Set(s string) error { f(s); return nil } - -func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { - prefix = processPrefix(prefix) - flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") - flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.") - flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.") - flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.") - flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.") - - flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.") - - flagSet.Var(flagFunc(flagFocus), prefix+"focus", "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed.") - flagSet.Var(flagFunc(flagSkip), prefix+"skip", "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed.") - - flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).") - - flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.") - - flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.") - - flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.") - - if includeParallelFlags { - flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.") - flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.") - flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.") - flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.") - } - - flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.") - flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.") - flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.") - flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.") - flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") - flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") - flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") - flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.") - flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.") - -} - -func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string { - prefix = processPrefix(prefix) - result := make([]string, 0) - - if ginkgo.RandomSeed > 0 { - result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed)) - } - - if ginkgo.RandomizeAllSpecs { - result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix)) - } - - if ginkgo.SkipMeasurements { - result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix)) - } - - if ginkgo.FailOnPending { - result = append(result, fmt.Sprintf("--%sfailOnPending", prefix)) - } - - if ginkgo.FailFast { - result = append(result, fmt.Sprintf("--%sfailFast", prefix)) - } - - if ginkgo.DryRun { - result = append(result, fmt.Sprintf("--%sdryRun", prefix)) - } - - for _, s := range ginkgo.FocusStrings { - result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, s)) - } - - for _, s := range ginkgo.SkipStrings { - result = append(result, fmt.Sprintf("--%sskip=%s", prefix, s)) - } - - if ginkgo.FlakeAttempts > 1 { - result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts)) - } - - if ginkgo.EmitSpecProgress { - result = append(result, fmt.Sprintf("--%sprogress", prefix)) - } - - if ginkgo.DebugParallel { - result = append(result, fmt.Sprintf("--%sdebug", prefix)) - } - - if ginkgo.ParallelNode != 0 { - result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode)) - } - - if ginkgo.ParallelTotal != 0 { - result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal)) - } - - if ginkgo.StreamHost != "" { - result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost)) - } - - if ginkgo.SyncHost != "" { - result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost)) - } - - if ginkgo.RegexScansFilePath { - result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix)) - } - - if reporter.NoColor { - result = append(result, fmt.Sprintf("--%snoColor", prefix)) - } - - if reporter.SlowSpecThreshold > 0 { - result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold)) - } - - if !reporter.NoisyPendings { - result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix)) - } - - if !reporter.NoisySkippings { - result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix)) - } - - if reporter.Verbose { - result = append(result, fmt.Sprintf("--%sv", prefix)) - } - - if reporter.Succinct { - result = append(result, fmt.Sprintf("--%ssuccinct", prefix)) - } - - if reporter.FullTrace { - result = append(result, fmt.Sprintf("--%strace", prefix)) - } - - if reporter.ReportPassed { - result = append(result, fmt.Sprintf("--%sreportPassed", prefix)) - } - - if reporter.ReportFile != "" { - result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile)) - } - - return result -} - -// flagFocus implements the -focus flag. -func flagFocus(arg string) { - if arg != "" { - GinkgoConfig.FocusStrings = append(GinkgoConfig.FocusStrings, arg) - } -} - -// flagSkip implements the -skip flag. -func flagSkip(arg string) { - if arg != "" { - GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg) - } -} diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table.go b/vendor/github.com/onsi/ginkgo/extensions/table/table.go deleted file mode 100644 index 4b002780..00000000 --- a/vendor/github.com/onsi/ginkgo/extensions/table/table.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - -Table provides a simple DSL for Ginkgo-native Table-Driven Tests - -The godoc documentation describes Table's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests - -*/ - -package table - -import ( - "fmt" - "reflect" - - "github.com/onsi/ginkgo/internal/codelocation" - "github.com/onsi/ginkgo/internal/global" - "github.com/onsi/ginkgo/types" -) - -/* -DescribeTable describes a table-driven test. - -For example: - - DescribeTable("a simple table", - func(x int, y int, expected bool) { - Ω(x > y).Should(Equal(expected)) - }, - Entry("x > y", 1, 0, true), - Entry("x == y", 0, 0, false), - Entry("x < y", 0, 1, false), - ) - -The first argument to `DescribeTable` is a string description. -The second argument is a function that will be run for each table entry. Your assertions go here - the function is equivalent to a Ginkgo It. -The subsequent arguments must be of type `TableEntry`. We recommend using the `Entry` convenience constructors. - -The `Entry` constructor takes a string description followed by an arbitrary set of parameters. These parameters are passed into your function. - -Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`. Each `Entry` is turned into an `It` within the `Describe`. - -It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run). - -Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry). In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable. - -A description function can be passed to Entry in place of the description. The function is then fed with the entry parameters to generate the description of the It corresponding to that particular Entry. - -For example: - - describe := func(desc string) func(int, int, bool) string { - return func(x, y int, expected bool) string { - return fmt.Sprintf("%s x=%d y=%d expected:%t", desc, x, y, expected) - } - } - - DescribeTable("a simple table", - func(x int, y int, expected bool) { - Ω(x > y).Should(Equal(expected)) - }, - Entry(describe("x > y"), 1, 0, true), - Entry(describe("x == y"), 0, 0, false), - Entry(describe("x < y"), 0, 1, false), - ) -*/ -func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { - describeTable(description, itBody, entries, types.FlagTypeNone) - return true -} - -/* -You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. -*/ -func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { - describeTable(description, itBody, entries, types.FlagTypeFocused) - return true -} - -/* -You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. -*/ -func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { - describeTable(description, itBody, entries, types.FlagTypePending) - return true -} - -/* -You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`. -*/ -func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { - describeTable(description, itBody, entries, types.FlagTypePending) - return true -} - -func describeTable(description string, itBody interface{}, entries []TableEntry, flag types.FlagType) { - itBodyValue := reflect.ValueOf(itBody) - if itBodyValue.Kind() != reflect.Func { - panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody)) - } - - global.Suite.PushContainerNode( - description, - func() { - for _, entry := range entries { - entry.generateIt(itBodyValue) - } - }, - flag, - codelocation.New(2), - ) -} diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go b/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go deleted file mode 100644 index 4d9c237a..00000000 --- a/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go +++ /dev/null @@ -1,129 +0,0 @@ -package table - -import ( - "fmt" - "reflect" - - "github.com/onsi/ginkgo/internal/codelocation" - "github.com/onsi/ginkgo/internal/global" - "github.com/onsi/ginkgo/types" -) - -/* -TableEntry represents an entry in a table test. You generally use the `Entry` constructor. -*/ -type TableEntry struct { - Description interface{} - Parameters []interface{} - Pending bool - Focused bool - codeLocation types.CodeLocation -} - -func (t TableEntry) generateIt(itBody reflect.Value) { - var description string - descriptionValue := reflect.ValueOf(t.Description) - switch descriptionValue.Kind() { - case reflect.String: - description = descriptionValue.String() - case reflect.Func: - values := castParameters(descriptionValue, t.Parameters) - res := descriptionValue.Call(values) - if len(res) != 1 { - panic(fmt.Sprintf("The describe function should return only a value, returned %d", len(res))) - } - if res[0].Kind() != reflect.String { - panic(fmt.Sprintf("The describe function should return a string, returned %#v", res[0])) - } - description = res[0].String() - default: - panic(fmt.Sprintf("Description can either be a string or a function, got %#v", descriptionValue)) - } - - if t.Pending { - global.Suite.PushItNode(description, func() {}, types.FlagTypePending, t.codeLocation, 0) - return - } - - values := castParameters(itBody, t.Parameters) - body := func() { - itBody.Call(values) - } - - if t.Focused { - global.Suite.PushItNode(description, body, types.FlagTypeFocused, t.codeLocation, global.DefaultTimeout) - } else { - global.Suite.PushItNode(description, body, types.FlagTypeNone, t.codeLocation, global.DefaultTimeout) - } -} - -func castParameters(function reflect.Value, parameters []interface{}) []reflect.Value { - res := make([]reflect.Value, len(parameters)) - funcType := function.Type() - for i, param := range parameters { - if param == nil { - inType := funcType.In(i) - res[i] = reflect.Zero(inType) - } else { - res[i] = reflect.ValueOf(param) - } - } - return res -} - -/* -Entry constructs a TableEntry. - -The first argument is a required description (this becomes the content of the generated Ginkgo `It`). -Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`. - -Each Entry ends up generating an individual Ginkgo It. -*/ -func Entry(description interface{}, parameters ...interface{}) TableEntry { - return TableEntry{ - Description: description, - Parameters: parameters, - Pending: false, - Focused: false, - codeLocation: codelocation.New(1), - } -} - -/* -You can focus a particular entry with FEntry. This is equivalent to FIt. -*/ -func FEntry(description interface{}, parameters ...interface{}) TableEntry { - return TableEntry{ - Description: description, - Parameters: parameters, - Pending: false, - Focused: true, - codeLocation: codelocation.New(1), - } -} - -/* -You can mark a particular entry as pending with PEntry. This is equivalent to PIt. -*/ -func PEntry(description interface{}, parameters ...interface{}) TableEntry { - return TableEntry{ - Description: description, - Parameters: parameters, - Pending: true, - Focused: false, - codeLocation: codelocation.New(1), - } -} - -/* -You can mark a particular entry as pending with XEntry. This is equivalent to XIt. -*/ -func XEntry(description interface{}, parameters ...interface{}) TableEntry { - return TableEntry{ - Description: description, - Parameters: parameters, - Pending: true, - Focused: false, - codeLocation: codelocation.New(1), - } -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go deleted file mode 100644 index ccd7685e..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go +++ /dev/null @@ -1,681 +0,0 @@ -/* -Ginkgo is a BDD-style testing framework for Golang - -The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/ - -Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega) - -Ginkgo on Github: http://github.com/onsi/ginkgo - -Ginkgo is MIT-Licensed -*/ -package ginkgo - -import ( - "flag" - "fmt" - "io" - "net/http" - "os" - "reflect" - "strings" - "time" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/internal/codelocation" - "github.com/onsi/ginkgo/internal/global" - "github.com/onsi/ginkgo/internal/remote" - "github.com/onsi/ginkgo/internal/testingtproxy" - "github.com/onsi/ginkgo/internal/writer" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/reporters/stenographer" - colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" - "github.com/onsi/ginkgo/types" -) - -var deprecationTracker = types.NewDeprecationTracker() - -const GINKGO_VERSION = config.VERSION -const GINKGO_PANIC = ` -Your test failed. -Ginkgo panics to prevent subsequent assertions from running. -Normally Ginkgo rescues this panic so you shouldn't see it. - -But, if you make an assertion in a goroutine, Ginkgo can't capture the panic. -To circumvent this, you should call - - defer GinkgoRecover() - -at the top of the goroutine that caused this panic. -` - -func init() { - config.Flags(flag.CommandLine, "ginkgo", true) - GinkgoWriter = writer.New(os.Stdout) -} - -//GinkgoWriter implements an io.Writer -//When running in verbose mode any writes to GinkgoWriter will be immediately printed -//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen -//only if the current test fails. -var GinkgoWriter io.Writer - -//The interface by which Ginkgo receives *testing.T -type GinkgoTestingT interface { - Fail() -} - -//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is -//useful for seeding your own pseudorandom number generators (PRNGs) to ensure -//consistent executions from run to run, where your tests contain variability (for -//example, when selecting random test data). -func GinkgoRandomSeed() int64 { - return config.GinkgoConfig.RandomSeed -} - -//GinkgoParallelNode is deprecated, use GinkgoParallelProcess instead -func GinkgoParallelNode() int { - deprecationTracker.TrackDeprecation(types.Deprecations.ParallelNode(), codelocation.New(1)) - return GinkgoParallelProcess() -} - -//GinkgoParallelProcess returns the parallel process number for the current ginkgo process -//The process number is 1-indexed -func GinkgoParallelProcess() int { - return config.GinkgoConfig.ParallelNode -} - -//Some matcher libraries or legacy codebases require a *testing.T -//GinkgoT implements an interface analogous to *testing.T and can be used if -//the library in question accepts *testing.T through an interface -// -// For example, with testify: -// assert.Equal(GinkgoT(), 123, 123, "they should be equal") -// -// Or with gomock: -// gomock.NewController(GinkgoT()) -// -// GinkgoT() takes an optional offset argument that can be used to get the -// correct line number associated with the failure. -func GinkgoT(optionalOffset ...int) GinkgoTInterface { - offset := 3 - if len(optionalOffset) > 0 { - offset = optionalOffset[0] - } - failedFunc := func() bool { - return CurrentGinkgoTestDescription().Failed - } - nameFunc := func() string { - return CurrentGinkgoTestDescription().FullTestText - } - return testingtproxy.New(GinkgoWriter, Fail, Skip, failedFunc, nameFunc, offset) -} - -//The interface returned by GinkgoT(). This covers most of the methods -//in the testing package's T. -type GinkgoTInterface interface { - Cleanup(func()) - Setenv(key, value string) - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Fail() - FailNow() - Failed() bool - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Helper() - Log(args ...interface{}) - Logf(format string, args ...interface{}) - Name() string - Parallel() - Skip(args ...interface{}) - SkipNow() - Skipf(format string, args ...interface{}) - Skipped() bool - TempDir() string -} - -//Custom Ginkgo test reporters must implement the Reporter interface. -// -//The custom reporter is passed in a SuiteSummary when the suite begins and ends, -//and a SpecSummary just before a spec begins and just after a spec ends -type Reporter reporters.Reporter - -//Asynchronous specs are given a channel of the Done type. You must close or write to the channel -//to tell Ginkgo that your async test is done. -type Done chan<- interface{} - -//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription -// FullTestText: a concatenation of ComponentTexts and the TestText -// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test -// TestText: the text in the actual It or Measure node -// IsMeasurement: true if the current test is a measurement -// FileName: the name of the file containing the current test -// LineNumber: the line number for the current test -// Failed: if the current test has failed, this will be true (useful in an AfterEach) -type GinkgoTestDescription struct { - FullTestText string - ComponentTexts []string - TestText string - - IsMeasurement bool - - FileName string - LineNumber int - - Failed bool - Duration time.Duration -} - -//CurrentGinkgoTestDescripton returns information about the current running test. -func CurrentGinkgoTestDescription() GinkgoTestDescription { - summary, ok := global.Suite.CurrentRunningSpecSummary() - if !ok { - return GinkgoTestDescription{} - } - - subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1] - - return GinkgoTestDescription{ - ComponentTexts: summary.ComponentTexts[1:], - FullTestText: strings.Join(summary.ComponentTexts[1:], " "), - TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1], - IsMeasurement: summary.IsMeasurement, - FileName: subjectCodeLocation.FileName, - LineNumber: subjectCodeLocation.LineNumber, - Failed: summary.HasFailureState(), - Duration: summary.RunTime, - } -} - -//Measurement tests receive a Benchmarker. -// -//You use the Time() function to time how long the passed in body function takes to run -//You use the RecordValue() function to track arbitrary numerical measurements. -//The RecordValueWithPrecision() function can be used alternatively to provide the unit -//and resolution of the numeric measurement. -//The optional info argument is passed to the test reporter and can be used to -// provide the measurement data to a custom reporter with context. -// -//See http://onsi.github.io/ginkgo/#benchmark_tests for more details -type Benchmarker interface { - Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) - RecordValue(name string, value float64, info ...interface{}) - RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) -} - -//RunSpecs is the entry point for the Ginkgo test runner. -//You must call this within a Golang testing TestX(t *testing.T) function. -// -//To bootstrap a test suite you can use the Ginkgo CLI: -// -// ginkgo bootstrap -func RunSpecs(t GinkgoTestingT, description string) bool { - specReporters := []Reporter{buildDefaultReporter()} - if config.DefaultReporterConfig.ReportFile != "" { - reportFile := config.DefaultReporterConfig.ReportFile - specReporters[0] = reporters.NewJUnitReporter(reportFile) - specReporters = append(specReporters, buildDefaultReporter()) - } - return runSpecsWithCustomReporters(t, description, specReporters) -} - -//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace -//RunSpecs() with this method. -func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) - specReporters = append(specReporters, buildDefaultReporter()) - return runSpecsWithCustomReporters(t, description, specReporters) -} - -//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace -//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter -func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) - return runSpecsWithCustomReporters(t, description, specReporters) -} - -func runSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { - writer := GinkgoWriter.(*writer.Writer) - writer.SetStream(config.DefaultReporterConfig.Verbose) - reporters := make([]reporters.Reporter, len(specReporters)) - for i, reporter := range specReporters { - reporters[i] = reporter - } - passed, hasFocusedTests := global.Suite.Run(t, description, reporters, writer, config.GinkgoConfig) - - if deprecationTracker.DidTrackDeprecations() { - fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport()) - } - - if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { - fmt.Println("PASS | FOCUSED") - os.Exit(types.GINKGO_FOCUS_EXIT_CODE) - } - return passed -} - -func buildDefaultReporter() Reporter { - remoteReportingServer := config.GinkgoConfig.StreamHost - if remoteReportingServer == "" { - stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout()) - return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer) - } else { - debugFile := "" - if config.GinkgoConfig.DebugParallel { - debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode) - } - return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile) - } -} - -//Skip notifies Ginkgo that the current spec was skipped. -func Skip(message string, callerSkip ...int) { - skip := 0 - if len(callerSkip) > 0 { - skip = callerSkip[0] - } - - global.Failer.Skip(message, codelocation.New(skip+1)) - panic(GINKGO_PANIC) -} - -//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.) -func Fail(message string, callerSkip ...int) { - skip := 0 - if len(callerSkip) > 0 { - skip = callerSkip[0] - } - - global.Failer.Fail(message, codelocation.New(skip+1)) - panic(GINKGO_PANIC) -} - -//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` -//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that -//calls out to Gomega -// -//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent -//further assertions from running. This panic must be recovered. Ginkgo does this for you -//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...) -// -//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no -//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine. -func GinkgoRecover() { - e := recover() - if e != nil { - global.Failer.Panic(codelocation.New(1), e) - } -} - -//Describe blocks allow you to organize your specs. A Describe block can contain any number of -//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. -// -//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally -//equivalent. The difference is purely semantic -- you typically Describe the behavior of an object -//or method and, within that Describe, outline a number of Contexts and Whens. -func Describe(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) - return true -} - -//You can focus the tests within a describe block using FDescribe -func FDescribe(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using PDescribe -func PDescribe(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using XDescribe -func XDescribe(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//Context blocks allow you to organize your specs. A Context block can contain any number of -//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. -// -//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally -//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object -//or method and, within that Describe, outline a number of Contexts and Whens. -func Context(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) - return true -} - -//You can focus the tests within a describe block using FContext -func FContext(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using PContext -func PContext(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using XContext -func XContext(text string, body func()) bool { - global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//When blocks allow you to organize your specs. A When block can contain any number of -//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. -// -//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally -//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object -//or method and, within that Describe, outline a number of Contexts and Whens. -func When(text string, body func()) bool { - global.Suite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1)) - return true -} - -//You can focus the tests within a describe block using FWhen -func FWhen(text string, body func()) bool { - global.Suite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using PWhen -func PWhen(text string, body func()) bool { - global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using XWhen -func XWhen(text string, body func()) bool { - global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks -//within an It block. -// -//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a -//function that accepts a Done channel. When you do this, you can also provide an optional timeout. -func It(text string, body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//You can focus individual Its using FIt -func FIt(text string, body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//You can mark Its as pending using PIt -func PIt(text string, _ ...interface{}) bool { - global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//You can mark Its as pending using XIt -func XIt(text string, _ ...interface{}) bool { - global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//Specify blocks are aliases for It blocks and allow for more natural wording in situations -//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks -//which apply to It blocks. -func Specify(text string, body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//You can focus individual Specifys using FSpecify -func FSpecify(text string, body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//You can mark Specifys as pending using PSpecify -func PSpecify(text string, is ...interface{}) bool { - global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//You can mark Specifys as pending using XSpecify -func XSpecify(text string, is ...interface{}) bool { - global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//By allows you to better document large Its. -// -//Generally you should try to keep your Its short and to the point. This is not always possible, however, -//especially in the context of integration tests that capture a particular workflow. -// -//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...) -//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function. -func By(text string, callbacks ...func()) { - preamble := "\x1b[1mSTEP\x1b[0m" - if config.DefaultReporterConfig.NoColor { - preamble = "STEP" - } - fmt.Fprintln(GinkgoWriter, preamble+": "+text) - if len(callbacks) == 1 { - callbacks[0]() - } - if len(callbacks) > 1 { - panic("just one callback per By, please") - } -} - -//Measure blocks run the passed in body function repeatedly (determined by the samples argument) -//and accumulate metrics provided to the Benchmarker by the body function. -// -//The body function must have the signature: -// func(b Benchmarker) -func Measure(text string, body interface{}, samples int) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) - global.Suite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples) - return true -} - -//You can focus individual Measures using FMeasure -func FMeasure(text string, body interface{}, samples int) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) - global.Suite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples) - return true -} - -//You can mark Measurements as pending using PMeasure -func PMeasure(text string, _ ...interface{}) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) - global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//You can mark Measurements as pending using XMeasure -func XMeasure(text string, _ ...interface{}) bool { - deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) - global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each -//parallel node process will call BeforeSuite. -// -//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel -// -//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. -func BeforeSuite(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed. -//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting. -// -//When running in parallel, each parallel node process will call AfterSuite. -// -//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel -// -//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. -func AfterSuite(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across -//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that -//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait -//until that node is done before running. -// -//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is -//run on all nodes, but *only* after the first function completes successfully. Ginkgo also makes it possible to send data from the first function (on Node 1) -//to the second function (on all the other nodes). -// -//The functions have the following signatures. The first function (which only runs on node 1) has the signature: -// -// func() []byte -// -//or, to run asynchronously: -// -// func(done Done) []byte -// -//The byte array returned by the first function is then passed to the second function, which has the signature: -// -// func(data []byte) -// -//or, to run asynchronously: -// -// func(data []byte, done Done) -// -//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes: -// -// var dbClient db.Client -// var dbRunner db.Runner -// -// var _ = SynchronizedBeforeSuite(func() []byte { -// dbRunner = db.NewRunner() -// err := dbRunner.Start() -// Ω(err).ShouldNot(HaveOccurred()) -// return []byte(dbRunner.URL) -// }, func(data []byte) { -// dbClient = db.NewClient() -// err := dbClient.Connect(string(data)) -// Ω(err).ShouldNot(HaveOccurred()) -// }) -func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool { - global.Suite.SetSynchronizedBeforeSuiteNode( - node1Body, - allNodesBody, - codelocation.New(1), - parseTimeout(timeout...), - ) - return true -} - -//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up -//external singleton resources shared across nodes when running tests in parallel. -// -//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1 -//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until -//all other nodes are finished. -// -//Both functions have the same signature: either func() or func(done Done) to run asynchronously. -// -//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database -//only after all nodes have finished: -// -// var _ = SynchronizedAfterSuite(func() { -// dbClient.Cleanup() -// }, func() { -// dbRunner.Stop() -// }) -func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool { - global.Suite.SetSynchronizedAfterSuiteNode( - allNodesBody, - node1Body, - codelocation.New(1), - parseTimeout(timeout...), - ) - return true -} - -//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested -//Describe and Context blocks the outermost BeforeEach blocks are run first. -// -//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts -//a Done channel -func BeforeEach(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details, -//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) -// -//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts -//a Done channel -func JustBeforeEach(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details, -//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) -// -//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts -//a Done channel -func JustAfterEach(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested -//Describe and Context blocks the innermost AfterEach blocks are run first. -// -//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts -//a Done channel -func AfterEach(body interface{}, timeout ...float64) bool { - validateBodyFunc(body, codelocation.New(1)) - global.Suite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -func validateBodyFunc(body interface{}, cl types.CodeLocation) { - t := reflect.TypeOf(body) - if t.Kind() != reflect.Func { - return - } - - if t.NumOut() > 0 { - return - } - - if t.NumIn() == 0 { - return - } - - if t.In(0) == reflect.TypeOf(make(Done)) { - deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl) - } -} - -func parseTimeout(timeout ...float64) time.Duration { - if len(timeout) == 0 { - return global.DefaultTimeout - } else { - return time.Duration(timeout[0] * float64(time.Second)) - } -} diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod deleted file mode 100644 index 17114432..00000000 --- a/vendor/github.com/onsi/ginkgo/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/onsi/ginkgo - -go 1.16 - -require ( - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 - github.com/nxadm/tail v1.4.8 - github.com/onsi/gomega v1.10.1 - golang.org/x/sys v0.0.0-20210112080510-489259a85091 - golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e -) - -retract v1.16.3 // git tag accidentally associated with incorrect git commit diff --git a/vendor/github.com/onsi/ginkgo/go.sum b/vendor/github.com/onsi/ginkgo/go.sum deleted file mode 100644 index 5c5c3c50..00000000 --- a/vendor/github.com/onsi/ginkgo/go.sum +++ /dev/null @@ -1,86 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go deleted file mode 100644 index aa89d6cb..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go +++ /dev/null @@ -1,48 +0,0 @@ -package codelocation - -import ( - "regexp" - "runtime" - "runtime/debug" - "strings" - - "github.com/onsi/ginkgo/types" -) - -func New(skip int) types.CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - stackTrace := PruneStack(string(debug.Stack()), skip+1) - return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} -} - -// PruneStack removes references to functions that are internal to Ginkgo -// and the Go runtime from a stack string and a certain number of stack entries -// at the beginning of the stack. The stack string has the format -// as returned by runtime/debug.Stack. The leading goroutine information is -// optional and always removed if present. Beware that runtime/debug.Stack -// adds itself as first entry, so typically skip must be >= 1 to remove that -// entry. -func PruneStack(fullStackTrace string, skip int) string { - stack := strings.Split(fullStackTrace, "\n") - // Ensure that the even entries are the method names and the - // the odd entries the source code information. - if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { - // Ignore "goroutine 29 [running]:" line. - stack = stack[1:] - } - // The "+1" is for skipping over the initial entry, which is - // runtime/debug.Stack() itself. - if len(stack) > 2*(skip+1) { - stack = stack[2*(skip+1):] - } - prunedStack := []string{} - re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) - for i := 0; i < len(stack)/2; i++ { - // We filter out based on the source code file name. - if !re.Match([]byte(stack[i*2+1])) { - prunedStack = append(prunedStack, stack[i*2]) - prunedStack = append(prunedStack, stack[i*2+1]) - } - } - return strings.Join(prunedStack, "\n") -} diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go deleted file mode 100644 index 0737746d..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go +++ /dev/null @@ -1,151 +0,0 @@ -package containernode - -import ( - "math/rand" - "sort" - - "github.com/onsi/ginkgo/internal/leafnodes" - "github.com/onsi/ginkgo/types" -) - -type subjectOrContainerNode struct { - containerNode *ContainerNode - subjectNode leafnodes.SubjectNode -} - -func (n subjectOrContainerNode) text() string { - if n.containerNode != nil { - return n.containerNode.Text() - } else { - return n.subjectNode.Text() - } -} - -type CollatedNodes struct { - Containers []*ContainerNode - Subject leafnodes.SubjectNode -} - -type ContainerNode struct { - text string - flag types.FlagType - codeLocation types.CodeLocation - - setupNodes []leafnodes.BasicNode - subjectAndContainerNodes []subjectOrContainerNode -} - -func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode { - return &ContainerNode{ - text: text, - flag: flag, - codeLocation: codeLocation, - } -} - -func (container *ContainerNode) Shuffle(r *rand.Rand) { - sort.Sort(container) - permutation := r.Perm(len(container.subjectAndContainerNodes)) - shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes)) - for i, j := range permutation { - shuffledNodes[i] = container.subjectAndContainerNodes[j] - } - container.subjectAndContainerNodes = shuffledNodes -} - -func (node *ContainerNode) BackPropagateProgrammaticFocus() bool { - if node.flag == types.FlagTypePending { - return false - } - - shouldUnfocus := false - for _, subjectOrContainerNode := range node.subjectAndContainerNodes { - if subjectOrContainerNode.containerNode != nil { - shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus - } else { - shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus - } - } - - if shouldUnfocus { - if node.flag == types.FlagTypeFocused { - node.flag = types.FlagTypeNone - } - return true - } - - return node.flag == types.FlagTypeFocused -} - -func (node *ContainerNode) Collate() []CollatedNodes { - return node.collate([]*ContainerNode{}) -} - -func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes { - collated := make([]CollatedNodes, 0) - - containers := make([]*ContainerNode, len(enclosingContainers)) - copy(containers, enclosingContainers) - containers = append(containers, node) - - for _, subjectOrContainer := range node.subjectAndContainerNodes { - if subjectOrContainer.containerNode != nil { - collated = append(collated, subjectOrContainer.containerNode.collate(containers)...) - } else { - collated = append(collated, CollatedNodes{ - Containers: containers, - Subject: subjectOrContainer.subjectNode, - }) - } - } - - return collated -} - -func (node *ContainerNode) PushContainerNode(container *ContainerNode) { - node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container}) -} - -func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) { - node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject}) -} - -func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) { - node.setupNodes = append(node.setupNodes, setupNode) -} - -func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode { - nodes := []leafnodes.BasicNode{} - for _, setupNode := range node.setupNodes { - if setupNode.Type() == nodeType { - nodes = append(nodes, setupNode) - } - } - return nodes -} - -func (node *ContainerNode) Text() string { - return node.text -} - -func (node *ContainerNode) CodeLocation() types.CodeLocation { - return node.codeLocation -} - -func (node *ContainerNode) Flag() types.FlagType { - return node.flag -} - -//sort.Interface - -func (node *ContainerNode) Len() int { - return len(node.subjectAndContainerNodes) -} - -func (node *ContainerNode) Less(i, j int) bool { - return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text() -} - -func (node *ContainerNode) Swap(i, j int) { - node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i] -} diff --git a/vendor/github.com/onsi/ginkgo/internal/global/init.go b/vendor/github.com/onsi/ginkgo/internal/global/init.go deleted file mode 100644 index 109f617a..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/global/init.go +++ /dev/null @@ -1,22 +0,0 @@ -package global - -import ( - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/internal/suite" -) - -const DefaultTimeout = time.Duration(1 * time.Second) - -var Suite *suite.Suite -var Failer *failer.Failer - -func init() { - InitializeGlobals() -} - -func InitializeGlobals() { - Failer = failer.New() - Suite = suite.New(Failer) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go deleted file mode 100644 index 393901e1..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go +++ /dev/null @@ -1,103 +0,0 @@ -package leafnodes - -import ( - "math" - "time" - - "sync" - - "github.com/onsi/ginkgo/types" -) - -type benchmarker struct { - mu sync.Mutex - measurements map[string]*types.SpecMeasurement - orderCounter int -} - -func newBenchmarker() *benchmarker { - return &benchmarker{ - measurements: make(map[string]*types.SpecMeasurement), - } -} - -func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) { - t := time.Now() - body() - elapsedTime = time.Since(t) - - b.mu.Lock() - defer b.mu.Unlock() - measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...) - measurement.Results = append(measurement.Results, elapsedTime.Seconds()) - - return -} - -func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) { - b.mu.Lock() - measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...) - defer b.mu.Unlock() - measurement.Results = append(measurement.Results, value) -} - -func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) { - b.mu.Lock() - measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...) - defer b.mu.Unlock() - measurement.Results = append(measurement.Results, value) -} - -func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement { - measurement, ok := b.measurements[name] - if !ok { - var computedInfo interface{} - computedInfo = nil - if len(info) > 0 { - computedInfo = info[0] - } - measurement = &types.SpecMeasurement{ - Name: name, - Info: computedInfo, - Order: b.orderCounter, - SmallestLabel: smallestLabel, - LargestLabel: largestLabel, - AverageLabel: averageLabel, - Units: units, - Precision: precision, - Results: make([]float64, 0), - } - b.measurements[name] = measurement - b.orderCounter++ - } - - return measurement -} - -func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement { - b.mu.Lock() - defer b.mu.Unlock() - for _, measurement := range b.measurements { - measurement.Smallest = math.MaxFloat64 - measurement.Largest = -math.MaxFloat64 - sum := float64(0) - sumOfSquares := float64(0) - - for _, result := range measurement.Results { - if result > measurement.Largest { - measurement.Largest = result - } - if result < measurement.Smallest { - measurement.Smallest = result - } - sum += result - sumOfSquares += result * result - } - - n := float64(len(measurement.Results)) - measurement.Average = sum / n - measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n)) - } - - return b.measurements -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go deleted file mode 100644 index 8c3902d6..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go +++ /dev/null @@ -1,19 +0,0 @@ -package leafnodes - -import ( - "github.com/onsi/ginkgo/types" -) - -type BasicNode interface { - Type() types.SpecComponentType - Run() (types.SpecState, types.SpecFailure) - CodeLocation() types.CodeLocation -} - -type SubjectNode interface { - BasicNode - - Text() string - Flag() types.FlagType - Samples() int -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go deleted file mode 100644 index 6eded7b7..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go +++ /dev/null @@ -1,47 +0,0 @@ -package leafnodes - -import ( - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type ItNode struct { - runner *runner - - flag types.FlagType - text string -} - -func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode { - return &ItNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex), - flag: flag, - text: text, - } -} - -func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) { - return node.runner.run() -} - -func (node *ItNode) Type() types.SpecComponentType { - return types.SpecComponentTypeIt -} - -func (node *ItNode) Text() string { - return node.text -} - -func (node *ItNode) Flag() types.FlagType { - return node.flag -} - -func (node *ItNode) CodeLocation() types.CodeLocation { - return node.runner.codeLocation -} - -func (node *ItNode) Samples() int { - return 1 -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go deleted file mode 100644 index 3ab9a6d5..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go +++ /dev/null @@ -1,62 +0,0 @@ -package leafnodes - -import ( - "reflect" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type MeasureNode struct { - runner *runner - - text string - flag types.FlagType - samples int - benchmarker *benchmarker -} - -func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode { - benchmarker := newBenchmarker() - - wrappedBody := func() { - reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)}) - } - - return &MeasureNode{ - runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex), - - text: text, - flag: flag, - samples: samples, - benchmarker: benchmarker, - } -} - -func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) { - return node.runner.run() -} - -func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement { - return node.benchmarker.measurementsReport() -} - -func (node *MeasureNode) Type() types.SpecComponentType { - return types.SpecComponentTypeMeasure -} - -func (node *MeasureNode) Text() string { - return node.text -} - -func (node *MeasureNode) Flag() types.FlagType { - return node.flag -} - -func (node *MeasureNode) CodeLocation() types.CodeLocation { - return node.runner.codeLocation -} - -func (node *MeasureNode) Samples() int { - return node.samples -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go deleted file mode 100644 index 16cb66c3..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go +++ /dev/null @@ -1,117 +0,0 @@ -package leafnodes - -import ( - "fmt" - "reflect" - "time" - - "github.com/onsi/ginkgo/internal/codelocation" - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type runner struct { - isAsync bool - asyncFunc func(chan<- interface{}) - syncFunc func() - codeLocation types.CodeLocation - timeoutThreshold time.Duration - nodeType types.SpecComponentType - componentIndex int - failer *failer.Failer -} - -func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner { - bodyType := reflect.TypeOf(body) - if bodyType.Kind() != reflect.Func { - panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation)) - } - - runner := &runner{ - codeLocation: codeLocation, - timeoutThreshold: timeout, - failer: failer, - nodeType: nodeType, - componentIndex: componentIndex, - } - - switch bodyType.NumIn() { - case 0: - runner.syncFunc = body.(func()) - return runner - case 1: - if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) { - panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation)) - } - - wrappedBody := func(done chan<- interface{}) { - bodyValue := reflect.ValueOf(body) - bodyValue.Call([]reflect.Value{reflect.ValueOf(done)}) - } - - runner.isAsync = true - runner.asyncFunc = wrappedBody - return runner - } - - panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation)) -} - -func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) { - if r.isAsync { - return r.runAsync() - } else { - return r.runSync() - } -} - -func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) { - done := make(chan interface{}, 1) - - go func() { - finished := false - - defer func() { - if e := recover(); e != nil || !finished { - r.failer.Panic(codelocation.New(2), e) - select { - case <-done: - break - default: - close(done) - } - } - }() - - r.asyncFunc(done) - finished = true - }() - - // If this goroutine gets no CPU time before the select block, - // the <-done case may complete even if the test took longer than the timeoutThreshold. - // This can cause flaky behaviour, but we haven't seen it in the wild. - select { - case <-done: - case <-time.After(r.timeoutThreshold): - r.failer.Timeout(r.codeLocation) - } - - failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) - return -} -func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) { - finished := false - - defer func() { - if e := recover(); e != nil || !finished { - r.failer.Panic(codelocation.New(2), e) - } - - failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) - }() - - r.syncFunc() - finished = true - - return -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go deleted file mode 100644 index e3e9cb7c..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go +++ /dev/null @@ -1,48 +0,0 @@ -package leafnodes - -import ( - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type SetupNode struct { - runner *runner -} - -func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) { - return node.runner.run() -} - -func (node *SetupNode) Type() types.SpecComponentType { - return node.runner.nodeType -} - -func (node *SetupNode) CodeLocation() types.CodeLocation { - return node.runner.codeLocation -} - -func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { - return &SetupNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex), - } -} - -func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { - return &SetupNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex), - } -} - -func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { - return &SetupNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex), - } -} - -func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { - return &SetupNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex), - } -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go deleted file mode 100644 index 80f16ed7..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go +++ /dev/null @@ -1,55 +0,0 @@ -package leafnodes - -import ( - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type SuiteNode interface { - Run(parallelNode int, parallelTotal int, syncHost string) bool - Passed() bool - Summary() *types.SetupSummary -} - -type simpleSuiteNode struct { - runner *runner - outcome types.SpecState - failure types.SpecFailure - runTime time.Duration -} - -func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { - t := time.Now() - node.outcome, node.failure = node.runner.run() - node.runTime = time.Since(t) - - return node.outcome == types.SpecStatePassed -} - -func (node *simpleSuiteNode) Passed() bool { - return node.outcome == types.SpecStatePassed -} - -func (node *simpleSuiteNode) Summary() *types.SetupSummary { - return &types.SetupSummary{ - ComponentType: node.runner.nodeType, - CodeLocation: node.runner.codeLocation, - State: node.outcome, - RunTime: node.runTime, - Failure: node.failure, - } -} - -func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - return &simpleSuiteNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0), - } -} - -func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - return &simpleSuiteNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), - } -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go deleted file mode 100644 index a721d0cf..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go +++ /dev/null @@ -1,90 +0,0 @@ -package leafnodes - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type synchronizedAfterSuiteNode struct { - runnerA *runner - runnerB *runner - - outcome types.SpecState - failure types.SpecFailure - runTime time.Duration -} - -func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - return &synchronizedAfterSuiteNode{ - runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), - runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), - } -} - -func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { - node.outcome, node.failure = node.runnerA.run() - - if parallelNode == 1 { - if parallelTotal > 1 { - node.waitUntilOtherNodesAreDone(syncHost) - } - - outcome, failure := node.runnerB.run() - - if node.outcome == types.SpecStatePassed { - node.outcome, node.failure = outcome, failure - } - } - - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedAfterSuiteNode) Passed() bool { - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary { - return &types.SetupSummary{ - ComponentType: node.runnerA.nodeType, - CodeLocation: node.runnerA.codeLocation, - State: node.outcome, - RunTime: node.runTime, - Failure: node.failure, - } -} - -func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) { - for { - if node.canRun(syncHost) { - return - } - - time.Sleep(50 * time.Millisecond) - } -} - -func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool { - resp, err := http.Get(syncHost + "/RemoteAfterSuiteData") - if err != nil || resp.StatusCode != http.StatusOK { - return false - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return false - } - resp.Body.Close() - - afterSuiteData := types.RemoteAfterSuiteData{} - err = json.Unmarshal(body, &afterSuiteData) - if err != nil { - return false - } - - return afterSuiteData.CanRun -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go deleted file mode 100644 index d5c88931..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go +++ /dev/null @@ -1,181 +0,0 @@ -package leafnodes - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "reflect" - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type synchronizedBeforeSuiteNode struct { - runnerA *runner - runnerB *runner - - data []byte - - outcome types.SpecState - failure types.SpecFailure - runTime time.Duration -} - -func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - node := &synchronizedBeforeSuiteNode{} - - node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) - node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) - - return node -} - -func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { - t := time.Now() - defer func() { - node.runTime = time.Since(t) - }() - - if parallelNode == 1 { - node.outcome, node.failure = node.runA(parallelTotal, syncHost) - } else { - node.outcome, node.failure = node.waitForA(syncHost) - } - - if node.outcome != types.SpecStatePassed { - return false - } - node.outcome, node.failure = node.runnerB.run() - - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) { - outcome, failure := node.runnerA.run() - - if parallelTotal > 1 { - state := types.RemoteBeforeSuiteStatePassed - if outcome != types.SpecStatePassed { - state = types.RemoteBeforeSuiteStateFailed - } - json := (types.RemoteBeforeSuiteData{ - Data: node.data, - State: state, - }).ToJSON() - http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json)) - } - - return outcome, failure -} - -func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) { - failure := func(message string) types.SpecFailure { - return types.SpecFailure{ - Message: message, - Location: node.runnerA.codeLocation, - ComponentType: node.runnerA.nodeType, - ComponentIndex: node.runnerA.componentIndex, - ComponentCodeLocation: node.runnerA.codeLocation, - } - } - for { - resp, err := http.Get(syncHost + "/BeforeSuiteState") - if err != nil || resp.StatusCode != http.StatusOK { - return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state") - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return types.SpecStateFailed, failure("Failed to read BeforeSuite state") - } - resp.Body.Close() - - beforeSuiteData := types.RemoteBeforeSuiteData{} - err = json.Unmarshal(body, &beforeSuiteData) - if err != nil { - return types.SpecStateFailed, failure("Failed to decode BeforeSuite state") - } - - switch beforeSuiteData.State { - case types.RemoteBeforeSuiteStatePassed: - node.data = beforeSuiteData.Data - return types.SpecStatePassed, types.SpecFailure{} - case types.RemoteBeforeSuiteStateFailed: - return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed") - case types.RemoteBeforeSuiteStateDisappeared: - return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite") - } - - time.Sleep(50 * time.Millisecond) - } -} - -func (node *synchronizedBeforeSuiteNode) Passed() bool { - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary { - return &types.SetupSummary{ - ComponentType: node.runnerA.nodeType, - CodeLocation: node.runnerA.codeLocation, - State: node.outcome, - RunTime: node.runTime, - Failure: node.failure, - } -} - -func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} { - typeA := reflect.TypeOf(bodyA) - if typeA.Kind() != reflect.Func { - panic("SynchronizedBeforeSuite expects a function as its first argument") - } - - takesNothing := typeA.NumIn() == 0 - takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface - returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8 - - if !((takesNothing || takesADoneChannel) && returnsBytes) { - panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.") - } - - if takesADoneChannel { - return func(done chan<- interface{}) { - out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)}) - node.data = out[0].Interface().([]byte) - } - } - - return func() { - out := reflect.ValueOf(bodyA).Call([]reflect.Value{}) - node.data = out[0].Interface().([]byte) - } -} - -func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} { - typeB := reflect.TypeOf(bodyB) - if typeB.Kind() != reflect.Func { - panic("SynchronizedBeforeSuite expects a function as its second argument") - } - - returnsNothing := typeB.NumOut() == 0 - takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 - takesBytesAndDone := typeB.NumIn() == 2 && - typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 && - typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface - - if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) { - panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)") - } - - if takesBytesAndDone { - return func(done chan<- interface{}) { - reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)}) - } - } - - return func() { - reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)}) - } -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go deleted file mode 100644 index 992437d9..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go +++ /dev/null @@ -1,249 +0,0 @@ -/* - -Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output -coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel: - - ginkgo -nodes=N - -where N is the number of nodes you desire. -*/ -package remote - -import ( - "time" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/reporters/stenographer" - "github.com/onsi/ginkgo/types" -) - -type configAndSuite struct { - config config.GinkgoConfigType - summary *types.SuiteSummary -} - -type Aggregator struct { - nodeCount int - config config.DefaultReporterConfigType - stenographer stenographer.Stenographer - result chan bool - - suiteBeginnings chan configAndSuite - aggregatedSuiteBeginnings []configAndSuite - - beforeSuites chan *types.SetupSummary - aggregatedBeforeSuites []*types.SetupSummary - - afterSuites chan *types.SetupSummary - aggregatedAfterSuites []*types.SetupSummary - - specCompletions chan *types.SpecSummary - completedSpecs []*types.SpecSummary - - suiteEndings chan *types.SuiteSummary - aggregatedSuiteEndings []*types.SuiteSummary - specs []*types.SpecSummary - - startTime time.Time -} - -func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator { - aggregator := &Aggregator{ - nodeCount: nodeCount, - result: result, - config: config, - stenographer: stenographer, - - suiteBeginnings: make(chan configAndSuite), - beforeSuites: make(chan *types.SetupSummary), - afterSuites: make(chan *types.SetupSummary), - specCompletions: make(chan *types.SpecSummary), - suiteEndings: make(chan *types.SuiteSummary), - } - - go aggregator.mux() - - return aggregator -} - -func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - aggregator.suiteBeginnings <- configAndSuite{config, summary} -} - -func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - aggregator.beforeSuites <- setupSummary -} - -func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - aggregator.afterSuites <- setupSummary -} - -func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) { - //noop -} - -func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) { - aggregator.specCompletions <- specSummary -} - -func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) { - aggregator.suiteEndings <- summary -} - -func (aggregator *Aggregator) mux() { -loop: - for { - select { - case configAndSuite := <-aggregator.suiteBeginnings: - aggregator.registerSuiteBeginning(configAndSuite) - case setupSummary := <-aggregator.beforeSuites: - aggregator.registerBeforeSuite(setupSummary) - case setupSummary := <-aggregator.afterSuites: - aggregator.registerAfterSuite(setupSummary) - case specSummary := <-aggregator.specCompletions: - aggregator.registerSpecCompletion(specSummary) - case suite := <-aggregator.suiteEndings: - finished, passed := aggregator.registerSuiteEnding(suite) - if finished { - aggregator.result <- passed - break loop - } - } - } -} - -func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) { - aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite) - - if len(aggregator.aggregatedSuiteBeginnings) == 1 { - aggregator.startTime = time.Now() - } - - if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { - return - } - - aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct) - - totalNumberOfSpecs := 0 - if len(aggregator.aggregatedSuiteBeginnings) > 0 { - totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization - } - - aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct) - aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) { - aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) { - aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) { - aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary) - aggregator.specs = append(aggregator.specs, specSummary) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) flushCompletedSpecs() { - if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { - return - } - - for _, setupSummary := range aggregator.aggregatedBeforeSuites { - aggregator.announceBeforeSuite(setupSummary) - } - - for _, specSummary := range aggregator.completedSpecs { - aggregator.announceSpec(specSummary) - } - - for _, setupSummary := range aggregator.aggregatedAfterSuites { - aggregator.announceAfterSuite(setupSummary) - } - - aggregator.aggregatedBeforeSuites = []*types.SetupSummary{} - aggregator.completedSpecs = []*types.SpecSummary{} - aggregator.aggregatedAfterSuites = []*types.SetupSummary{} -} - -func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) { - aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) - if setupSummary.State != types.SpecStatePassed { - aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - } -} - -func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) { - aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) - if setupSummary.State != types.SpecStatePassed { - aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - } -} - -func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) { - if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { - aggregator.stenographer.AnnounceSpecWillRun(specSummary) - } - - aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) - - switch specSummary.State { - case types.SpecStatePassed: - if specSummary.IsMeasurement { - aggregator.stenographer.AnnounceSuccessfulMeasurement(specSummary, aggregator.config.Succinct) - } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold { - aggregator.stenographer.AnnounceSuccessfulSlowSpec(specSummary, aggregator.config.Succinct) - } else { - aggregator.stenographer.AnnounceSuccessfulSpec(specSummary) - } - - case types.SpecStatePending: - aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct) - case types.SpecStateSkipped: - aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace) - case types.SpecStateTimedOut: - aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - case types.SpecStatePanicked: - aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - case types.SpecStateFailed: - aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - } -} - -func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) { - aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite) - if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount { - return false, false - } - - aggregatedSuiteSummary := &types.SuiteSummary{} - aggregatedSuiteSummary.SuiteSucceeded = true - - for _, suiteSummary := range aggregator.aggregatedSuiteEndings { - if !suiteSummary.SuiteSucceeded { - aggregatedSuiteSummary.SuiteSucceeded = false - } - - aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun - aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs - aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs - aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs - aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs - aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs - aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs - } - - aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime) - - aggregator.stenographer.SummarizeFailures(aggregator.specs) - aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct) - - return true, aggregatedSuiteSummary.SuiteSucceeded -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go deleted file mode 100644 index 284bc62e..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go +++ /dev/null @@ -1,147 +0,0 @@ -package remote - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - - "github.com/onsi/ginkgo/internal/writer" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/reporters/stenographer" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" -) - -//An interface to net/http's client to allow the injection of fakes under test -type Poster interface { - Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) -} - -/* -The ForwardingReporter is a Ginkgo reporter that forwards information to -a Ginkgo remote server. - -When streaming parallel test output, this repoter is automatically installed by Ginkgo. - -This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner -detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter -in place of Ginkgo's DefaultReporter. -*/ - -type ForwardingReporter struct { - serverHost string - poster Poster - outputInterceptor OutputInterceptor - debugMode bool - debugFile *os.File - nestedReporter *reporters.DefaultReporter -} - -func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter { - reporter := &ForwardingReporter{ - serverHost: serverHost, - poster: poster, - outputInterceptor: outputInterceptor, - } - - if debugFile != "" { - var err error - reporter.debugMode = true - reporter.debugFile, err = os.Create(debugFile) - if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } - - if !config.Verbose { - //if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication. - ginkgoWriter.AndRedirectTo(reporter.debugFile) - } - outputInterceptor.StreamTo(reporter.debugFile) //This is not working - - stenographer := stenographer.New(false, true, reporter.debugFile) - config.Succinct = false - config.Verbose = true - config.FullTrace = true - reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer) - } - - return reporter -} - -func (reporter *ForwardingReporter) post(path string, data interface{}) { - encoded, _ := json.Marshal(data) - buffer := bytes.NewBuffer(encoded) - reporter.poster.Post(reporter.serverHost+path, "application/json", buffer) -} - -func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) { - data := struct { - Config config.GinkgoConfigType `json:"config"` - Summary *types.SuiteSummary `json:"suite-summary"` - }{ - conf, - summary, - } - - reporter.outputInterceptor.StartInterceptingOutput() - if reporter.debugMode { - reporter.nestedReporter.SpecSuiteWillBegin(conf, summary) - reporter.debugFile.Sync() - } - reporter.post("/SpecSuiteWillBegin", data) -} - -func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() - reporter.outputInterceptor.StartInterceptingOutput() - setupSummary.CapturedOutput = output - if reporter.debugMode { - reporter.nestedReporter.BeforeSuiteDidRun(setupSummary) - reporter.debugFile.Sync() - } - reporter.post("/BeforeSuiteDidRun", setupSummary) -} - -func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) { - if reporter.debugMode { - reporter.nestedReporter.SpecWillRun(specSummary) - reporter.debugFile.Sync() - } - reporter.post("/SpecWillRun", specSummary) -} - -func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) { - output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() - reporter.outputInterceptor.StartInterceptingOutput() - specSummary.CapturedOutput = output - if reporter.debugMode { - reporter.nestedReporter.SpecDidComplete(specSummary) - reporter.debugFile.Sync() - } - reporter.post("/SpecDidComplete", specSummary) -} - -func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() - reporter.outputInterceptor.StartInterceptingOutput() - setupSummary.CapturedOutput = output - if reporter.debugMode { - reporter.nestedReporter.AfterSuiteDidRun(setupSummary) - reporter.debugFile.Sync() - } - reporter.post("/AfterSuiteDidRun", setupSummary) -} - -func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - reporter.outputInterceptor.StopInterceptingAndReturnOutput() - if reporter.debugMode { - reporter.nestedReporter.SpecSuiteDidEnd(summary) - reporter.debugFile.Sync() - } - reporter.post("/SpecSuiteDidEnd", summary) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go deleted file mode 100644 index 5154abe8..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go +++ /dev/null @@ -1,13 +0,0 @@ -package remote - -import "os" - -/* -The OutputInterceptor is used by the ForwardingReporter to -intercept and capture all stdin and stderr output during a test run. -*/ -type OutputInterceptor interface { - StartInterceptingOutput() error - StopInterceptingAndReturnOutput() (string, error) - StreamTo(*os.File) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go deleted file mode 100644 index 774967db..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build freebsd openbsd netbsd dragonfly darwin linux solaris - -package remote - -import ( - "errors" - "io/ioutil" - "os" - - "github.com/nxadm/tail" - "golang.org/x/sys/unix" -) - -func NewOutputInterceptor() OutputInterceptor { - return &outputInterceptor{} -} - -type outputInterceptor struct { - redirectFile *os.File - streamTarget *os.File - intercepting bool - tailer *tail.Tail - doneTailing chan bool -} - -func (interceptor *outputInterceptor) StartInterceptingOutput() error { - if interceptor.intercepting { - return errors.New("Already intercepting output!") - } - interceptor.intercepting = true - - var err error - - interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output") - if err != nil { - return err - } - - // This might call Dup3 if the dup2 syscall is not available, e.g. on - // linux/arm64 or linux/riscv64 - unix.Dup2(int(interceptor.redirectFile.Fd()), 1) - unix.Dup2(int(interceptor.redirectFile.Fd()), 2) - - if interceptor.streamTarget != nil { - interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true}) - interceptor.doneTailing = make(chan bool) - - go func() { - for line := range interceptor.tailer.Lines { - interceptor.streamTarget.Write([]byte(line.Text + "\n")) - } - close(interceptor.doneTailing) - }() - } - - return nil -} - -func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { - if !interceptor.intercepting { - return "", errors.New("Not intercepting output!") - } - - interceptor.redirectFile.Close() - output, err := ioutil.ReadFile(interceptor.redirectFile.Name()) - os.Remove(interceptor.redirectFile.Name()) - - interceptor.intercepting = false - - if interceptor.streamTarget != nil { - interceptor.tailer.Stop() - interceptor.tailer.Cleanup() - <-interceptor.doneTailing - interceptor.streamTarget.Sync() - } - - return string(output), err -} - -func (interceptor *outputInterceptor) StreamTo(out *os.File) { - interceptor.streamTarget = out -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go deleted file mode 100644 index 40c79033..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build windows - -package remote - -import ( - "errors" - "os" -) - -func NewOutputInterceptor() OutputInterceptor { - return &outputInterceptor{} -} - -type outputInterceptor struct { - intercepting bool -} - -func (interceptor *outputInterceptor) StartInterceptingOutput() error { - if interceptor.intercepting { - return errors.New("Already intercepting output!") - } - interceptor.intercepting = true - - // not working on windows... - - return nil -} - -func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { - // not working on windows... - interceptor.intercepting = false - - return "", nil -} - -func (interceptor *outputInterceptor) StreamTo(*os.File) {} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go deleted file mode 100644 index 93e9dac0..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/server.go +++ /dev/null @@ -1,224 +0,0 @@ -/* - -The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. -This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). - -*/ - -package remote - -import ( - "encoding/json" - "io/ioutil" - "net" - "net/http" - "sync" - - "github.com/onsi/ginkgo/internal/spec_iterator" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/types" -) - -/* -Server spins up on an automatically selected port and listens for communication from the forwarding reporter. -It then forwards that communication to attached reporters. -*/ -type Server struct { - listener net.Listener - reporters []reporters.Reporter - alives []func() bool - lock *sync.Mutex - beforeSuiteData types.RemoteBeforeSuiteData - parallelTotal int - counter int -} - -//Create a new server, automatically selecting a port -func NewServer(parallelTotal int) (*Server, error) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return nil, err - } - return &Server{ - listener: listener, - lock: &sync.Mutex{}, - alives: make([]func() bool, parallelTotal), - beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}, - parallelTotal: parallelTotal, - }, nil -} - -//Start the server. You don't need to `go s.Start()`, just `s.Start()` -func (server *Server) Start() { - httpServer := &http.Server{} - mux := http.NewServeMux() - httpServer.Handler = mux - - //streaming endpoints - mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin) - mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun) - mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun) - mux.HandleFunc("/SpecWillRun", server.specWillRun) - mux.HandleFunc("/SpecDidComplete", server.specDidComplete) - mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd) - - //synchronization endpoints - mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState) - mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData) - mux.HandleFunc("/counter", server.handleCounter) - mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility - - go httpServer.Serve(server.listener) -} - -//Stop the server -func (server *Server) Close() { - server.listener.Close() -} - -//The address the server can be reached it. Pass this into the `ForwardingReporter`. -func (server *Server) Address() string { - return "http://" + server.listener.Addr().String() -} - -// -// Streaming Endpoints -// - -//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` -func (server *Server) readAll(request *http.Request) []byte { - defer request.Body.Close() - body, _ := ioutil.ReadAll(request.Body) - return body -} - -func (server *Server) RegisterReporters(reporters ...reporters.Reporter) { - server.reporters = reporters -} - -func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - - var data struct { - Config config.GinkgoConfigType `json:"config"` - Summary *types.SuiteSummary `json:"suite-summary"` - } - - json.Unmarshal(body, &data) - - for _, reporter := range server.reporters { - reporter.SpecSuiteWillBegin(data.Config, data.Summary) - } -} - -func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var setupSummary *types.SetupSummary - json.Unmarshal(body, &setupSummary) - - for _, reporter := range server.reporters { - reporter.BeforeSuiteDidRun(setupSummary) - } -} - -func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var setupSummary *types.SetupSummary - json.Unmarshal(body, &setupSummary) - - for _, reporter := range server.reporters { - reporter.AfterSuiteDidRun(setupSummary) - } -} - -func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var specSummary *types.SpecSummary - json.Unmarshal(body, &specSummary) - - for _, reporter := range server.reporters { - reporter.SpecWillRun(specSummary) - } -} - -func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var specSummary *types.SpecSummary - json.Unmarshal(body, &specSummary) - - for _, reporter := range server.reporters { - reporter.SpecDidComplete(specSummary) - } -} - -func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var suiteSummary *types.SuiteSummary - json.Unmarshal(body, &suiteSummary) - - for _, reporter := range server.reporters { - reporter.SpecSuiteDidEnd(suiteSummary) - } -} - -// -// Synchronization Endpoints -// - -func (server *Server) RegisterAlive(node int, alive func() bool) { - server.lock.Lock() - defer server.lock.Unlock() - server.alives[node-1] = alive -} - -func (server *Server) nodeIsAlive(node int) bool { - server.lock.Lock() - defer server.lock.Unlock() - alive := server.alives[node-1] - if alive == nil { - return true - } - return alive() -} - -func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { - if request.Method == "POST" { - dec := json.NewDecoder(request.Body) - dec.Decode(&(server.beforeSuiteData)) - } else { - beforeSuiteData := server.beforeSuiteData - if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) { - beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared - } - enc := json.NewEncoder(writer) - enc.Encode(beforeSuiteData) - } -} - -func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) { - afterSuiteData := types.RemoteAfterSuiteData{ - CanRun: true, - } - for i := 2; i <= server.parallelTotal; i++ { - afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i) - } - - enc := json.NewEncoder(writer) - enc.Encode(afterSuiteData) -} - -func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) { - c := spec_iterator.Counter{} - server.lock.Lock() - c.Index = server.counter - server.counter++ - server.lock.Unlock() - - json.NewEncoder(writer).Encode(c) -} - -func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) { - writer.Write([]byte("")) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go deleted file mode 100644 index 6eef40a0..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go +++ /dev/null @@ -1,247 +0,0 @@ -package spec - -import ( - "fmt" - "io" - "time" - - "sync" - - "github.com/onsi/ginkgo/internal/containernode" - "github.com/onsi/ginkgo/internal/leafnodes" - "github.com/onsi/ginkgo/types" -) - -type Spec struct { - subject leafnodes.SubjectNode - focused bool - announceProgress bool - - containers []*containernode.ContainerNode - - state types.SpecState - runTime time.Duration - startTime time.Time - failure types.SpecFailure - previousFailures bool - - stateMutex *sync.Mutex -} - -func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec { - spec := &Spec{ - subject: subject, - containers: containers, - focused: subject.Flag() == types.FlagTypeFocused, - announceProgress: announceProgress, - stateMutex: &sync.Mutex{}, - } - - spec.processFlag(subject.Flag()) - for i := len(containers) - 1; i >= 0; i-- { - spec.processFlag(containers[i].Flag()) - } - - return spec -} - -func (spec *Spec) processFlag(flag types.FlagType) { - if flag == types.FlagTypeFocused { - spec.focused = true - } else if flag == types.FlagTypePending { - spec.setState(types.SpecStatePending) - } -} - -func (spec *Spec) Skip() { - spec.setState(types.SpecStateSkipped) -} - -func (spec *Spec) Failed() bool { - return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut -} - -func (spec *Spec) Passed() bool { - return spec.getState() == types.SpecStatePassed -} - -func (spec *Spec) Flaked() bool { - return spec.getState() == types.SpecStatePassed && spec.previousFailures -} - -func (spec *Spec) Pending() bool { - return spec.getState() == types.SpecStatePending -} - -func (spec *Spec) Skipped() bool { - return spec.getState() == types.SpecStateSkipped -} - -func (spec *Spec) Focused() bool { - return spec.focused -} - -func (spec *Spec) IsMeasurement() bool { - return spec.subject.Type() == types.SpecComponentTypeMeasure -} - -func (spec *Spec) Summary(suiteID string) *types.SpecSummary { - componentTexts := make([]string, len(spec.containers)+1) - componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1) - - for i, container := range spec.containers { - componentTexts[i] = container.Text() - componentCodeLocations[i] = container.CodeLocation() - } - - componentTexts[len(spec.containers)] = spec.subject.Text() - componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation() - - runTime := spec.runTime - if runTime == 0 && !spec.startTime.IsZero() { - runTime = time.Since(spec.startTime) - } - - return &types.SpecSummary{ - IsMeasurement: spec.IsMeasurement(), - NumberOfSamples: spec.subject.Samples(), - ComponentTexts: componentTexts, - ComponentCodeLocations: componentCodeLocations, - State: spec.getState(), - RunTime: runTime, - Failure: spec.failure, - Measurements: spec.measurementsReport(), - SuiteID: suiteID, - } -} - -func (spec *Spec) ConcatenatedString() string { - s := "" - for _, container := range spec.containers { - s += container.Text() + " " - } - - return s + spec.subject.Text() -} - -func (spec *Spec) Run(writer io.Writer) { - if spec.getState() == types.SpecStateFailed { - spec.previousFailures = true - } - - spec.startTime = time.Now() - defer func() { - spec.runTime = time.Since(spec.startTime) - }() - - for sample := 0; sample < spec.subject.Samples(); sample++ { - spec.runSample(sample, writer) - - if spec.getState() != types.SpecStatePassed { - return - } - } -} - -func (spec *Spec) getState() types.SpecState { - spec.stateMutex.Lock() - defer spec.stateMutex.Unlock() - return spec.state -} - -func (spec *Spec) setState(state types.SpecState) { - spec.stateMutex.Lock() - defer spec.stateMutex.Unlock() - spec.state = state -} - -func (spec *Spec) runSample(sample int, writer io.Writer) { - spec.setState(types.SpecStatePassed) - spec.failure = types.SpecFailure{} - innerMostContainerIndexToUnwind := -1 - - defer func() { - for i := innerMostContainerIndexToUnwind; i >= 0; i-- { - container := spec.containers[i] - for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) { - spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach) - justAfterEachState, justAfterEachFailure := justAfterEach.Run() - if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed { - spec.state = justAfterEachState - spec.failure = justAfterEachFailure - } - } - } - - for i := innerMostContainerIndexToUnwind; i >= 0; i-- { - container := spec.containers[i] - for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) { - spec.announceSetupNode(writer, "AfterEach", container, afterEach) - afterEachState, afterEachFailure := afterEach.Run() - if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed { - spec.setState(afterEachState) - spec.failure = afterEachFailure - } - } - } - }() - - for i, container := range spec.containers { - innerMostContainerIndexToUnwind = i - for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) { - spec.announceSetupNode(writer, "BeforeEach", container, beforeEach) - s, f := beforeEach.Run() - spec.failure = f - spec.setState(s) - if spec.getState() != types.SpecStatePassed { - return - } - } - } - - for _, container := range spec.containers { - for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) { - spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach) - s, f := justBeforeEach.Run() - spec.failure = f - spec.setState(s) - if spec.getState() != types.SpecStatePassed { - return - } - } - } - - spec.announceSubject(writer, spec.subject) - s, f := spec.subject.Run() - spec.failure = f - spec.setState(s) -} - -func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) { - if spec.announceProgress { - s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String()) - writer.Write([]byte(s)) - } -} - -func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) { - if spec.announceProgress { - nodeType := "" - switch subject.Type() { - case types.SpecComponentTypeIt: - nodeType = "It" - case types.SpecComponentTypeMeasure: - nodeType = "Measure" - } - s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String()) - writer.Write([]byte(s)) - } -} - -func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement { - if !spec.IsMeasurement() || spec.Failed() { - return map[string]*types.SpecMeasurement{} - } - - return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport() -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go deleted file mode 100644 index 0a24139f..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go +++ /dev/null @@ -1,144 +0,0 @@ -package spec - -import ( - "math/rand" - "regexp" - "sort" - "strings" -) - -type Specs struct { - specs []*Spec - names []string - - hasProgrammaticFocus bool - RegexScansFilePath bool -} - -func NewSpecs(specs []*Spec) *Specs { - names := make([]string, len(specs)) - for i, spec := range specs { - names[i] = spec.ConcatenatedString() - } - return &Specs{ - specs: specs, - names: names, - } -} - -func (e *Specs) Specs() []*Spec { - return e.specs -} - -func (e *Specs) HasProgrammaticFocus() bool { - return e.hasProgrammaticFocus -} - -func (e *Specs) Shuffle(r *rand.Rand) { - sort.Sort(e) - permutation := r.Perm(len(e.specs)) - shuffledSpecs := make([]*Spec, len(e.specs)) - names := make([]string, len(e.specs)) - for i, j := range permutation { - shuffledSpecs[i] = e.specs[j] - names[i] = e.names[j] - } - e.specs = shuffledSpecs - e.names = names -} - -func (e *Specs) ApplyFocus(description string, focus, skip []string) { - if len(focus)+len(skip) == 0 { - e.applyProgrammaticFocus() - } else { - e.applyRegExpFocusAndSkip(description, focus, skip) - } -} - -func (e *Specs) applyProgrammaticFocus() { - e.hasProgrammaticFocus = false - for _, spec := range e.specs { - if spec.Focused() && !spec.Pending() { - e.hasProgrammaticFocus = true - break - } - } - - if e.hasProgrammaticFocus { - for _, spec := range e.specs { - if !spec.Focused() { - spec.Skip() - } - } - } -} - -// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function, -// this is the place which we append to. -func (e *Specs) toMatch(description string, i int) []byte { - if i > len(e.names) { - return nil - } - if e.RegexScansFilePath { - return []byte( - description + " " + - e.names[i] + " " + - e.specs[i].subject.CodeLocation().FileName) - } else { - return []byte( - description + " " + - e.names[i]) - } -} - -func (e *Specs) applyRegExpFocusAndSkip(description string, focus, skip []string) { - var focusFilter, skipFilter *regexp.Regexp - if len(focus) > 0 { - focusFilter = regexp.MustCompile(strings.Join(focus, "|")) - } - if len(skip) > 0 { - skipFilter = regexp.MustCompile(strings.Join(skip, "|")) - } - - for i, spec := range e.specs { - matchesFocus := true - matchesSkip := false - - toMatch := e.toMatch(description, i) - - if focusFilter != nil { - matchesFocus = focusFilter.Match(toMatch) - } - - if skipFilter != nil { - matchesSkip = skipFilter.Match(toMatch) - } - - if !matchesFocus || matchesSkip { - spec.Skip() - } - } -} - -func (e *Specs) SkipMeasurements() { - for _, spec := range e.specs { - if spec.IsMeasurement() { - spec.Skip() - } - } -} - -//sort.Interface - -func (e *Specs) Len() int { - return len(e.specs) -} - -func (e *Specs) Less(i, j int) bool { - return e.names[i] < e.names[j] -} - -func (e *Specs) Swap(i, j int) { - e.names[i], e.names[j] = e.names[j], e.names[i] - e.specs[i], e.specs[j] = e.specs[j], e.specs[i] -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go deleted file mode 100644 index 82272554..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go +++ /dev/null @@ -1,55 +0,0 @@ -package spec_iterator - -func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) { - if length == 0 { - return 0, 0 - } - - // We have more nodes than tests. Trivial case. - if parallelTotal >= length { - if parallelNode > length { - return 0, 0 - } else { - return parallelNode - 1, 1 - } - } - - // This is the minimum amount of tests that a node will be required to run - minTestsPerNode := length / parallelTotal - - // This is the maximum amount of tests that a node will be required to run - // The algorithm guarantees that this would be equal to at least the minimum amount - // and at most one more - maxTestsPerNode := minTestsPerNode - if length%parallelTotal != 0 { - maxTestsPerNode++ - } - - // Number of nodes that will have to run the maximum amount of tests per node - numMaxLoadNodes := length % parallelTotal - - // Number of nodes that precede the current node and will have to run the maximum amount of tests per node - var numPrecedingMaxLoadNodes int - if parallelNode > numMaxLoadNodes { - numPrecedingMaxLoadNodes = numMaxLoadNodes - } else { - numPrecedingMaxLoadNodes = parallelNode - 1 - } - - // Number of nodes that precede the current node and will have to run the minimum amount of tests per node - var numPrecedingMinLoadNodes int - if parallelNode <= numMaxLoadNodes { - numPrecedingMinLoadNodes = 0 - } else { - numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1 - } - - // Evaluate the test start index and number of tests to run - startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode - if parallelNode > numMaxLoadNodes { - count = minTestsPerNode - } else { - count = maxTestsPerNode - } - return -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go deleted file mode 100644 index 99f548bc..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go +++ /dev/null @@ -1,59 +0,0 @@ -package spec_iterator - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/onsi/ginkgo/internal/spec" -) - -type ParallelIterator struct { - specs []*spec.Spec - host string - client *http.Client -} - -func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator { - return &ParallelIterator{ - specs: specs, - host: host, - client: &http.Client{}, - } -} - -func (s *ParallelIterator) Next() (*spec.Spec, error) { - resp, err := s.client.Get(s.host + "/counter") - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode) - } - - var counter Counter - err = json.NewDecoder(resp.Body).Decode(&counter) - if err != nil { - return nil, err - } - - if counter.Index >= len(s.specs) { - return nil, ErrClosed - } - - return s.specs[counter.Index], nil -} - -func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int { - return len(s.specs) -} - -func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { - return -1, false -} - -func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { - return -1, false -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go deleted file mode 100644 index a51c93b8..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go +++ /dev/null @@ -1,45 +0,0 @@ -package spec_iterator - -import ( - "github.com/onsi/ginkgo/internal/spec" -) - -type SerialIterator struct { - specs []*spec.Spec - index int -} - -func NewSerialIterator(specs []*spec.Spec) *SerialIterator { - return &SerialIterator{ - specs: specs, - index: 0, - } -} - -func (s *SerialIterator) Next() (*spec.Spec, error) { - if s.index >= len(s.specs) { - return nil, ErrClosed - } - - spec := s.specs[s.index] - s.index += 1 - return spec, nil -} - -func (s *SerialIterator) NumberOfSpecsPriorToIteration() int { - return len(s.specs) -} - -func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { - return len(s.specs), true -} - -func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { - count := 0 - for _, s := range s.specs { - if !s.Skipped() && !s.Pending() { - count += 1 - } - } - return count, true -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go deleted file mode 100644 index ad4a3ea3..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go +++ /dev/null @@ -1,47 +0,0 @@ -package spec_iterator - -import "github.com/onsi/ginkgo/internal/spec" - -type ShardedParallelIterator struct { - specs []*spec.Spec - index int - maxIndex int -} - -func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator { - startIndex, count := ParallelizedIndexRange(len(specs), total, node) - - return &ShardedParallelIterator{ - specs: specs, - index: startIndex, - maxIndex: startIndex + count, - } -} - -func (s *ShardedParallelIterator) Next() (*spec.Spec, error) { - if s.index >= s.maxIndex { - return nil, ErrClosed - } - - spec := s.specs[s.index] - s.index += 1 - return spec, nil -} - -func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int { - return len(s.specs) -} - -func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { - return s.maxIndex - s.index, true -} - -func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { - count := 0 - for i := s.index; i < s.maxIndex; i += 1 { - if !s.specs[i].Skipped() && !s.specs[i].Pending() { - count += 1 - } - } - return count, true -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go deleted file mode 100644 index 74bffad6..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go +++ /dev/null @@ -1,20 +0,0 @@ -package spec_iterator - -import ( - "errors" - - "github.com/onsi/ginkgo/internal/spec" -) - -var ErrClosed = errors.New("no more specs to run") - -type SpecIterator interface { - Next() (*spec.Spec, error) - NumberOfSpecsPriorToIteration() int - NumberOfSpecsToProcessIfKnown() (int, bool) - NumberOfSpecsThatWillBeRunIfKnown() (int, bool) -} - -type Counter struct { - Index int `json:"index"` -} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go deleted file mode 100644 index a0b8b62d..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go +++ /dev/null @@ -1,15 +0,0 @@ -package specrunner - -import ( - "crypto/rand" - "fmt" -) - -func randomID() string { - b := make([]byte, 8) - _, err := rand.Read(b) - if err != nil { - return "" - } - return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8]) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go deleted file mode 100644 index c9a0a60d..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go +++ /dev/null @@ -1,411 +0,0 @@ -package specrunner - -import ( - "fmt" - "os" - "os/signal" - "sync" - "syscall" - - "github.com/onsi/ginkgo/internal/spec_iterator" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/internal/leafnodes" - "github.com/onsi/ginkgo/internal/spec" - Writer "github.com/onsi/ginkgo/internal/writer" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/types" - - "time" -) - -type SpecRunner struct { - description string - beforeSuiteNode leafnodes.SuiteNode - iterator spec_iterator.SpecIterator - afterSuiteNode leafnodes.SuiteNode - reporters []reporters.Reporter - startTime time.Time - suiteID string - runningSpec *spec.Spec - writer Writer.WriterInterface - config config.GinkgoConfigType - interrupted bool - processedSpecs []*spec.Spec - lock *sync.Mutex -} - -func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner { - return &SpecRunner{ - description: description, - beforeSuiteNode: beforeSuiteNode, - iterator: iterator, - afterSuiteNode: afterSuiteNode, - reporters: reporters, - writer: writer, - config: config, - suiteID: randomID(), - lock: &sync.Mutex{}, - } -} - -func (runner *SpecRunner) Run() bool { - if runner.config.DryRun { - runner.performDryRun() - return true - } - - runner.reportSuiteWillBegin() - signalRegistered := make(chan struct{}) - go runner.registerForInterrupts(signalRegistered) - <-signalRegistered - - suitePassed := runner.runBeforeSuite() - - if suitePassed { - suitePassed = runner.runSpecs() - } - - runner.blockForeverIfInterrupted() - - suitePassed = runner.runAfterSuite() && suitePassed - - runner.reportSuiteDidEnd(suitePassed) - - return suitePassed -} - -func (runner *SpecRunner) performDryRun() { - runner.reportSuiteWillBegin() - - if runner.beforeSuiteNode != nil { - summary := runner.beforeSuiteNode.Summary() - summary.State = types.SpecStatePassed - runner.reportBeforeSuite(summary) - } - - for { - spec, err := runner.iterator.Next() - if err == spec_iterator.ErrClosed { - break - } - if err != nil { - fmt.Println("failed to iterate over tests:\n" + err.Error()) - break - } - - runner.processedSpecs = append(runner.processedSpecs, spec) - - summary := spec.Summary(runner.suiteID) - runner.reportSpecWillRun(summary) - if summary.State == types.SpecStateInvalid { - summary.State = types.SpecStatePassed - } - runner.reportSpecDidComplete(summary, false) - } - - if runner.afterSuiteNode != nil { - summary := runner.afterSuiteNode.Summary() - summary.State = types.SpecStatePassed - runner.reportAfterSuite(summary) - } - - runner.reportSuiteDidEnd(true) -} - -func (runner *SpecRunner) runBeforeSuite() bool { - if runner.beforeSuiteNode == nil || runner.wasInterrupted() { - return true - } - - runner.writer.Truncate() - conf := runner.config - passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) - if !passed { - runner.writer.DumpOut() - } - runner.reportBeforeSuite(runner.beforeSuiteNode.Summary()) - return passed -} - -func (runner *SpecRunner) runAfterSuite() bool { - if runner.afterSuiteNode == nil { - return true - } - - runner.writer.Truncate() - conf := runner.config - passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) - if !passed { - runner.writer.DumpOut() - } - runner.reportAfterSuite(runner.afterSuiteNode.Summary()) - return passed -} - -func (runner *SpecRunner) runSpecs() bool { - suiteFailed := false - skipRemainingSpecs := false - for { - spec, err := runner.iterator.Next() - if err == spec_iterator.ErrClosed { - break - } - if err != nil { - fmt.Println("failed to iterate over tests:\n" + err.Error()) - suiteFailed = true - break - } - - runner.processedSpecs = append(runner.processedSpecs, spec) - - if runner.wasInterrupted() { - break - } - if skipRemainingSpecs { - spec.Skip() - } - - if !spec.Skipped() && !spec.Pending() { - if passed := runner.runSpec(spec); !passed { - suiteFailed = true - } - } else if spec.Pending() && runner.config.FailOnPending { - runner.reportSpecWillRun(spec.Summary(runner.suiteID)) - suiteFailed = true - runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) - } else { - runner.reportSpecWillRun(spec.Summary(runner.suiteID)) - runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) - } - - if spec.Failed() && runner.config.FailFast { - skipRemainingSpecs = true - } - } - - return !suiteFailed -} - -func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) { - maxAttempts := 1 - if runner.config.FlakeAttempts > 0 { - // uninitialized configs count as 1 - maxAttempts = runner.config.FlakeAttempts - } - - for i := 0; i < maxAttempts; i++ { - runner.reportSpecWillRun(spec.Summary(runner.suiteID)) - runner.runningSpec = spec - spec.Run(runner.writer) - runner.runningSpec = nil - runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) - if !spec.Failed() { - return true - } - } - return false -} - -func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) { - if runner.runningSpec == nil { - return nil, false - } - - return runner.runningSpec.Summary(runner.suiteID), true -} - -func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - close(signalRegistered) - - <-c - signal.Stop(c) - runner.markInterrupted() - go runner.registerForHardInterrupts() - runner.writer.DumpOutWithHeader(` -Received interrupt. Emitting contents of GinkgoWriter... ---------------------------------------------------------- -`) - if runner.afterSuiteNode != nil { - fmt.Fprint(os.Stderr, ` ---------------------------------------------------------- -Received interrupt. Running AfterSuite... -^C again to terminate immediately -`) - runner.runAfterSuite() - } - runner.reportSuiteDidEnd(false) - os.Exit(1) -} - -func (runner *SpecRunner) registerForHardInterrupts() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - - <-c - fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.") - os.Exit(1) -} - -func (runner *SpecRunner) blockForeverIfInterrupted() { - runner.lock.Lock() - interrupted := runner.interrupted - runner.lock.Unlock() - - if interrupted { - select {} - } -} - -func (runner *SpecRunner) markInterrupted() { - runner.lock.Lock() - defer runner.lock.Unlock() - runner.interrupted = true -} - -func (runner *SpecRunner) wasInterrupted() bool { - runner.lock.Lock() - defer runner.lock.Unlock() - return runner.interrupted -} - -func (runner *SpecRunner) reportSuiteWillBegin() { - runner.startTime = time.Now() - summary := runner.suiteWillBeginSummary() - for _, reporter := range runner.reporters { - reporter.SpecSuiteWillBegin(runner.config, summary) - } -} - -func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) { - for _, reporter := range runner.reporters { - reporter.BeforeSuiteDidRun(summary) - } -} - -func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) { - for _, reporter := range runner.reporters { - reporter.AfterSuiteDidRun(summary) - } -} - -func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) { - runner.writer.Truncate() - - for _, reporter := range runner.reporters { - reporter.SpecWillRun(summary) - } -} - -func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) { - if len(summary.CapturedOutput) == 0 { - summary.CapturedOutput = string(runner.writer.Bytes()) - } - for i := len(runner.reporters) - 1; i >= 1; i-- { - runner.reporters[i].SpecDidComplete(summary) - } - - if failed { - runner.writer.DumpOut() - } - - runner.reporters[0].SpecDidComplete(summary) -} - -func (runner *SpecRunner) reportSuiteDidEnd(success bool) { - summary := runner.suiteDidEndSummary(success) - summary.RunTime = time.Since(runner.startTime) - for _, reporter := range runner.reporters { - reporter.SpecSuiteDidEnd(summary) - } -} - -func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) { - count = 0 - - for _, spec := range runner.processedSpecs { - if filter(spec) { - count++ - } - } - - return count -} - -func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary { - numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return !ex.Skipped() && !ex.Pending() - }) - - numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return ex.Pending() - }) - - numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return ex.Skipped() - }) - - numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return ex.Passed() - }) - - numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return ex.Flaked() - }) - - numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { - return ex.Failed() - }) - - if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun { - var known bool - numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() - if !known { - numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration() - } - numberOfFailedSpecs = numberOfSpecsThatWillBeRun - } - - return &types.SuiteSummary{ - SuiteDescription: runner.description, - SuiteSucceeded: success, - SuiteID: runner.suiteID, - - NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), - NumberOfTotalSpecs: len(runner.processedSpecs), - NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun, - NumberOfPendingSpecs: numberOfPendingSpecs, - NumberOfSkippedSpecs: numberOfSkippedSpecs, - NumberOfPassedSpecs: numberOfPassedSpecs, - NumberOfFailedSpecs: numberOfFailedSpecs, - NumberOfFlakedSpecs: numberOfFlakedSpecs, - } -} - -func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary { - numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown() - if !known { - numTotal = -1 - } - - numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() - if !known { - numToRun = -1 - } - - return &types.SuiteSummary{ - SuiteDescription: runner.description, - SuiteID: runner.suiteID, - - NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), - NumberOfTotalSpecs: numTotal, - NumberOfSpecsThatWillBeRun: numToRun, - NumberOfPendingSpecs: -1, - NumberOfSkippedSpecs: -1, - NumberOfPassedSpecs: -1, - NumberOfFailedSpecs: -1, - NumberOfFlakedSpecs: -1, - } -} diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go deleted file mode 100644 index b4a83c43..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go +++ /dev/null @@ -1,227 +0,0 @@ -package suite - -import ( - "math/rand" - "net/http" - "time" - - "github.com/onsi/ginkgo/internal/spec_iterator" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/internal/containernode" - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/internal/leafnodes" - "github.com/onsi/ginkgo/internal/spec" - "github.com/onsi/ginkgo/internal/specrunner" - "github.com/onsi/ginkgo/internal/writer" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/types" -) - -type ginkgoTestingT interface { - Fail() -} - -type deferredContainerNode struct { - text string - body func() - flag types.FlagType - codeLocation types.CodeLocation -} - -type Suite struct { - topLevelContainer *containernode.ContainerNode - currentContainer *containernode.ContainerNode - - deferredContainerNodes []deferredContainerNode - - containerIndex int - beforeSuiteNode leafnodes.SuiteNode - afterSuiteNode leafnodes.SuiteNode - runner *specrunner.SpecRunner - failer *failer.Failer - running bool - expandTopLevelNodes bool -} - -func New(failer *failer.Failer) *Suite { - topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{}) - - return &Suite{ - topLevelContainer: topLevelContainer, - currentContainer: topLevelContainer, - failer: failer, - containerIndex: 1, - deferredContainerNodes: []deferredContainerNode{}, - } -} - -func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) { - if config.ParallelTotal < 1 { - panic("ginkgo.parallel.total must be >= 1") - } - - if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 { - panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total") - } - - suite.expandTopLevelNodes = true - for _, deferredNode := range suite.deferredContainerNodes { - suite.PushContainerNode(deferredNode.text, deferredNode.body, deferredNode.flag, deferredNode.codeLocation) - } - - r := rand.New(rand.NewSource(config.RandomSeed)) - suite.topLevelContainer.Shuffle(r) - iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config) - suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config) - - suite.running = true - success := suite.runner.Run() - if !success { - t.Fail() - } - return success, hasProgrammaticFocus -} - -func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) { - specsSlice := []*spec.Spec{} - suite.topLevelContainer.BackPropagateProgrammaticFocus() - for _, collatedNodes := range suite.topLevelContainer.Collate() { - specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress)) - } - - specs := spec.NewSpecs(specsSlice) - specs.RegexScansFilePath = config.RegexScansFilePath - - if config.RandomizeAllSpecs { - specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) - } - - specs.ApplyFocus(description, config.FocusStrings, config.SkipStrings) - - if config.SkipMeasurements { - specs.SkipMeasurements() - } - - var iterator spec_iterator.SpecIterator - - if config.ParallelTotal > 1 { - iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost) - resp, err := http.Get(config.SyncHost + "/has-counter") - if err != nil || resp.StatusCode != http.StatusOK { - iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode) - } - } else { - iterator = spec_iterator.NewSerialIterator(specs.Specs()) - } - - return iterator, specs.HasProgrammaticFocus() -} - -func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) { - if !suite.running { - return nil, false - } - return suite.runner.CurrentSpecSummary() -} - -func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.beforeSuiteNode != nil { - panic("You may only call BeforeSuite once!") - } - suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.afterSuiteNode != nil { - panic("You may only call AfterSuite once!") - } - suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.beforeSuiteNode != nil { - panic("You may only call BeforeSuite once!") - } - suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.afterSuiteNode != nil { - panic("You may only call AfterSuite once!") - } - suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) { - /* - We defer walking the container nodes (which immediately evaluates the `body` function) - until `RunSpecs` is called. We do this by storing off the deferred container nodes. Then, when - `RunSpecs` is called we actually go through and add the container nodes to the test structure. - - This allows us to defer calling all the `body` functions until _after_ the top level functions - have been walked, _after_ func init()s have been called, and _after_ `go test` has called `flag.Parse()`. - - This allows users to load up configuration information in the `TestX` go test hook just before `RunSpecs` - is invoked and solves issues like #693 and makes the lifecycle easier to reason about. - - */ - if !suite.expandTopLevelNodes { - suite.deferredContainerNodes = append(suite.deferredContainerNodes, deferredContainerNode{text, body, flag, codeLocation}) - return - } - - container := containernode.New(text, flag, codeLocation) - suite.currentContainer.PushContainerNode(container) - - previousContainer := suite.currentContainer - suite.currentContainer = container - suite.containerIndex++ - - body() - - suite.containerIndex-- - suite.currentContainer = previousContainer -} - -func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation) - } - suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) { - if suite.running { - suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation) - } - suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation) - } - suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation) - } - suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation) - } - suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation) - } - suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go deleted file mode 100644 index 6739c3f6..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go +++ /dev/null @@ -1,36 +0,0 @@ -package writer - -type FakeGinkgoWriter struct { - EventStream []string -} - -func NewFake() *FakeGinkgoWriter { - return &FakeGinkgoWriter{ - EventStream: []string{}, - } -} - -func (writer *FakeGinkgoWriter) AddEvent(event string) { - writer.EventStream = append(writer.EventStream, event) -} - -func (writer *FakeGinkgoWriter) Truncate() { - writer.EventStream = append(writer.EventStream, "TRUNCATE") -} - -func (writer *FakeGinkgoWriter) DumpOut() { - writer.EventStream = append(writer.EventStream, "DUMP") -} - -func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) { - writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header) -} - -func (writer *FakeGinkgoWriter) Bytes() []byte { - writer.EventStream = append(writer.EventStream, "BYTES") - return nil -} - -func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) { - return 0, nil -} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go deleted file mode 100644 index 98eca3bd..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go +++ /dev/null @@ -1,89 +0,0 @@ -package writer - -import ( - "bytes" - "io" - "sync" -) - -type WriterInterface interface { - io.Writer - - Truncate() - DumpOut() - DumpOutWithHeader(header string) - Bytes() []byte -} - -type Writer struct { - buffer *bytes.Buffer - outWriter io.Writer - lock *sync.Mutex - stream bool - redirector io.Writer -} - -func New(outWriter io.Writer) *Writer { - return &Writer{ - buffer: &bytes.Buffer{}, - lock: &sync.Mutex{}, - outWriter: outWriter, - stream: true, - } -} - -func (w *Writer) AndRedirectTo(writer io.Writer) { - w.redirector = writer -} - -func (w *Writer) SetStream(stream bool) { - w.lock.Lock() - defer w.lock.Unlock() - w.stream = stream -} - -func (w *Writer) Write(b []byte) (n int, err error) { - w.lock.Lock() - defer w.lock.Unlock() - - n, err = w.buffer.Write(b) - if w.redirector != nil { - w.redirector.Write(b) - } - if w.stream { - return w.outWriter.Write(b) - } - return n, err -} - -func (w *Writer) Truncate() { - w.lock.Lock() - defer w.lock.Unlock() - w.buffer.Reset() -} - -func (w *Writer) DumpOut() { - w.lock.Lock() - defer w.lock.Unlock() - if !w.stream { - w.buffer.WriteTo(w.outWriter) - } -} - -func (w *Writer) Bytes() []byte { - w.lock.Lock() - defer w.lock.Unlock() - b := w.buffer.Bytes() - copied := make([]byte, len(b)) - copy(copied, b) - return copied -} - -func (w *Writer) DumpOutWithHeader(header string) { - w.lock.Lock() - defer w.lock.Unlock() - if !w.stream && w.buffer.Len() > 0 { - w.outWriter.Write([]byte(header)) - w.buffer.WriteTo(w.outWriter) - } -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go deleted file mode 100644 index f0c9f614..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Ginkgo's Default Reporter - -A number of command line flags are available to tweak Ginkgo's default output. - -These are documented [here](http://onsi.github.io/ginkgo/#running_tests) -*/ -package reporters - -import ( - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/reporters/stenographer" - "github.com/onsi/ginkgo/types" -) - -type DefaultReporter struct { - config config.DefaultReporterConfigType - stenographer stenographer.Stenographer - specSummaries []*types.SpecSummary -} - -func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter { - return &DefaultReporter{ - config: config, - stenographer: stenographer, - } -} - -func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct) - if config.ParallelTotal > 1 { - reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct) - } else { - reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct) - } -} - -func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) - } -} - -func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) - } -} - -func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) { - if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { - reporter.stenographer.AnnounceSpecWillRun(specSummary) - } -} - -func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) { - switch specSummary.State { - case types.SpecStatePassed: - if specSummary.IsMeasurement { - reporter.stenographer.AnnounceSuccessfulMeasurement(specSummary, reporter.config.Succinct) - } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold { - reporter.stenographer.AnnounceSuccessfulSlowSpec(specSummary, reporter.config.Succinct) - } else { - reporter.stenographer.AnnounceSuccessfulSpec(specSummary) - if reporter.config.ReportPassed { - reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) - } - } - case types.SpecStatePending: - reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct) - case types.SpecStateSkipped: - reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace) - case types.SpecStateTimedOut: - reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace) - case types.SpecStatePanicked: - reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace) - case types.SpecStateFailed: - reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace) - } - - reporter.specSummaries = append(reporter.specSummaries, specSummary) -} - -func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - reporter.stenographer.SummarizeFailures(reporter.specSummaries) - reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct) -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go deleted file mode 100644 index 27db4794..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go +++ /dev/null @@ -1,59 +0,0 @@ -package reporters - -import ( - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" -) - -//FakeReporter is useful for testing purposes -type FakeReporter struct { - Config config.GinkgoConfigType - - BeginSummary *types.SuiteSummary - BeforeSuiteSummary *types.SetupSummary - SpecWillRunSummaries []*types.SpecSummary - SpecSummaries []*types.SpecSummary - AfterSuiteSummary *types.SetupSummary - EndSummary *types.SuiteSummary - - SpecWillRunStub func(specSummary *types.SpecSummary) - SpecDidCompleteStub func(specSummary *types.SpecSummary) -} - -func NewFakeReporter() *FakeReporter { - return &FakeReporter{ - SpecWillRunSummaries: make([]*types.SpecSummary, 0), - SpecSummaries: make([]*types.SpecSummary, 0), - } -} - -func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - fakeR.Config = config - fakeR.BeginSummary = summary -} - -func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - fakeR.BeforeSuiteSummary = setupSummary -} - -func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) { - if fakeR.SpecWillRunStub != nil { - fakeR.SpecWillRunStub(specSummary) - } - fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary) -} - -func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) { - if fakeR.SpecDidCompleteStub != nil { - fakeR.SpecDidCompleteStub(specSummary) - } - fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary) -} - -func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - fakeR.AfterSuiteSummary = setupSummary -} - -func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - fakeR.EndSummary = summary -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go deleted file mode 100644 index 01ddca6e..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go +++ /dev/null @@ -1,178 +0,0 @@ -/* - -JUnit XML Reporter for Ginkgo - -For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output - -*/ - -package reporters - -import ( - "encoding/xml" - "fmt" - "math" - "os" - "path/filepath" - "strings" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" -) - -type JUnitTestSuite struct { - XMLName xml.Name `xml:"testsuite"` - TestCases []JUnitTestCase `xml:"testcase"` - Name string `xml:"name,attr"` - Tests int `xml:"tests,attr"` - Failures int `xml:"failures,attr"` - Errors int `xml:"errors,attr"` - Time float64 `xml:"time,attr"` -} - -type JUnitTestCase struct { - Name string `xml:"name,attr"` - ClassName string `xml:"classname,attr"` - FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` - Skipped *JUnitSkipped `xml:"skipped,omitempty"` - Time float64 `xml:"time,attr"` - SystemOut string `xml:"system-out,omitempty"` -} - -type JUnitFailureMessage struct { - Type string `xml:"type,attr"` - Message string `xml:",chardata"` -} - -type JUnitSkipped struct { - Message string `xml:",chardata"` -} - -type JUnitReporter struct { - suite JUnitTestSuite - filename string - testSuiteName string - ReporterConfig config.DefaultReporterConfigType -} - -//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename. -func NewJUnitReporter(filename string) *JUnitReporter { - return &JUnitReporter{ - filename: filename, - } -} - -func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) { - reporter.suite = JUnitTestSuite{ - Name: summary.SuiteDescription, - TestCases: []JUnitTestCase{}, - } - reporter.testSuiteName = summary.SuiteDescription - reporter.ReporterConfig = config.DefaultReporterConfig -} - -func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) { -} - -func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("BeforeSuite", setupSummary) -} - -func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("AfterSuite", setupSummary) -} - -func failureMessage(failure types.SpecFailure) string { - return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String()) -} - -func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - testCase := JUnitTestCase{ - Name: name, - ClassName: reporter.testSuiteName, - } - - testCase.FailureMessage = &JUnitFailureMessage{ - Type: reporter.failureTypeForState(setupSummary.State), - Message: failureMessage(setupSummary.Failure), - } - testCase.SystemOut = setupSummary.CapturedOutput - testCase.Time = setupSummary.RunTime.Seconds() - reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) - } -} - -func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { - testCase := JUnitTestCase{ - Name: strings.Join(specSummary.ComponentTexts[1:], " "), - ClassName: reporter.testSuiteName, - } - if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed { - testCase.SystemOut = specSummary.CapturedOutput - } - if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { - testCase.FailureMessage = &JUnitFailureMessage{ - Type: reporter.failureTypeForState(specSummary.State), - Message: failureMessage(specSummary.Failure), - } - if specSummary.State == types.SpecStatePanicked { - testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s", - specSummary.Failure.ForwardedPanic, - specSummary.Failure.Location.FullStackTrace) - } - testCase.SystemOut = specSummary.CapturedOutput - } - if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { - testCase.Skipped = &JUnitSkipped{} - if specSummary.Failure.Message != "" { - testCase.Skipped.Message = failureMessage(specSummary.Failure) - } - } - testCase.Time = specSummary.RunTime.Seconds() - reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) -} - -func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun - reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000 - reporter.suite.Failures = summary.NumberOfFailedSpecs - reporter.suite.Errors = 0 - if reporter.ReporterConfig.ReportFile != "" { - reporter.filename = reporter.ReporterConfig.ReportFile - fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename) - } - filePath, _ := filepath.Abs(reporter.filename) - dirPath := filepath.Dir(filePath) - err := os.MkdirAll(dirPath, os.ModePerm) - if err != nil { - fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error()) - } - file, err := os.Create(filePath) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error()) - } - defer file.Close() - file.WriteString(xml.Header) - encoder := xml.NewEncoder(file) - encoder.Indent(" ", " ") - err = encoder.Encode(reporter.suite) - if err == nil { - fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath) - } else { - fmt.Fprintf(os.Stderr,"\nFailed to generate JUnit report data:\n\t%s", err.Error()) - } -} - -func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string { - switch state { - case types.SpecStateFailed: - return "Failure" - case types.SpecStateTimedOut: - return "Timeout" - case types.SpecStatePanicked: - return "Panic" - default: - return "" - } -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/reporters/reporter.go deleted file mode 100644 index 348b9dfc..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/reporter.go +++ /dev/null @@ -1,15 +0,0 @@ -package reporters - -import ( - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" -) - -type Reporter interface { - SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) - BeforeSuiteDidRun(setupSummary *types.SetupSummary) - SpecWillRun(specSummary *types.SpecSummary) - SpecDidComplete(specSummary *types.SpecSummary) - AfterSuiteDidRun(setupSummary *types.SetupSummary) - SpecSuiteDidEnd(summary *types.SuiteSummary) -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go deleted file mode 100644 index 45b8f886..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go +++ /dev/null @@ -1,64 +0,0 @@ -package stenographer - -import ( - "fmt" - "strings" -) - -func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string { - var out string - - if len(args) > 0 { - out = fmt.Sprintf(format, args...) - } else { - out = format - } - - if s.color { - return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle) - } else { - return out - } -} - -func (s *consoleStenographer) printBanner(text string, bannerCharacter string) { - fmt.Fprintln(s.w, text) - fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text))) -} - -func (s *consoleStenographer) printNewLine() { - fmt.Fprintln(s.w, "") -} - -func (s *consoleStenographer) printDelimiter() { - fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30))) -} - -func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) { - fmt.Fprint(s.w, s.indent(indentation, format, args...)) -} - -func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) { - fmt.Fprintln(s.w, s.indent(indentation, format, args...)) -} - -func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string { - var text string - - if len(args) > 0 { - text = fmt.Sprintf(format, args...) - } else { - text = format - } - - stringArray := strings.Split(text, "\n") - padding := "" - if indentation >= 0 { - padding = strings.Repeat(" ", indentation) - } - for i, s := range stringArray { - stringArray[i] = fmt.Sprintf("%s%s", padding, s) - } - - return strings.Join(stringArray, "\n") -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go deleted file mode 100644 index 1aa5b9db..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go +++ /dev/null @@ -1,142 +0,0 @@ -package stenographer - -import ( - "sync" - - "github.com/onsi/ginkgo/types" -) - -func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall { - return FakeStenographerCall{ - Method: method, - Args: args, - } -} - -type FakeStenographer struct { - calls []FakeStenographerCall - lock *sync.Mutex -} - -type FakeStenographerCall struct { - Method string - Args []interface{} -} - -func NewFakeStenographer() *FakeStenographer { - stenographer := &FakeStenographer{ - lock: &sync.Mutex{}, - } - stenographer.Reset() - return stenographer -} - -func (stenographer *FakeStenographer) Calls() []FakeStenographerCall { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - return stenographer.calls -} - -func (stenographer *FakeStenographer) Reset() { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - stenographer.calls = make([]FakeStenographerCall, 0) -} - -func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - results := make([]FakeStenographerCall, 0) - for _, call := range stenographer.calls { - if call.Method == method { - results = append(results, call) - } - } - - return results -} - -func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...)) -} - -func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { - stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct) -} - -func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { - stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct) -} - -func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { - stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct) -} - -func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { - stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct) -} - -func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { - stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct) -} - -func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { - stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct) -} - -func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { - stenographer.registerCall("AnnounceSpecWillRun", spec) -} - -func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace) -} -func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) { - stenographer.registerCall("AnnounceCapturedOutput", output) -} - -func (stenographer *FakeStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) { - stenographer.registerCall("AnnounceSuccessfulSpec", spec) -} - -func (stenographer *FakeStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) { - stenographer.registerCall("AnnounceSuccessfulSlowSpec", spec, succinct) -} - -func (stenographer *FakeStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) { - stenographer.registerCall("AnnounceSuccessfulMeasurement", spec, succinct) -} - -func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { - stenographer.registerCall("AnnouncePendingSpec", spec, noisy) -} - -func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) { - stenographer.registerCall("SummarizeFailures", summaries) -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go deleted file mode 100644 index 638d6fbb..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go +++ /dev/null @@ -1,572 +0,0 @@ -/* -The stenographer is used by Ginkgo's reporters to generate output. - -Move along, nothing to see here. -*/ - -package stenographer - -import ( - "fmt" - "io" - "runtime" - "strings" - - "github.com/onsi/ginkgo/types" -) - -const defaultStyle = "\x1b[0m" -const boldStyle = "\x1b[1m" -const redColor = "\x1b[91m" -const greenColor = "\x1b[32m" -const yellowColor = "\x1b[33m" -const cyanColor = "\x1b[36m" -const grayColor = "\x1b[90m" -const lightGrayColor = "\x1b[37m" - -type cursorStateType int - -const ( - cursorStateTop cursorStateType = iota - cursorStateStreaming - cursorStateMidBlock - cursorStateEndBlock -) - -type Stenographer interface { - AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) - AnnounceAggregatedParallelRun(nodes int, succinct bool) - AnnounceParallelRun(node int, nodes int, succinct bool) - AnnounceTotalNumberOfSpecs(total int, succinct bool) - AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) - AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) - - AnnounceSpecWillRun(spec *types.SpecSummary) - AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) - AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) - - AnnounceCapturedOutput(output string) - - AnnounceSuccessfulSpec(spec *types.SpecSummary) - AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) - AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) - - AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) - AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) - - AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) - AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) - AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) - - SummarizeFailures(summaries []*types.SpecSummary) -} - -func New(color bool, enableFlakes bool, writer io.Writer) Stenographer { - denoter := "•" - if runtime.GOOS == "windows" { - denoter = "+" - } - return &consoleStenographer{ - color: color, - denoter: denoter, - cursorState: cursorStateTop, - enableFlakes: enableFlakes, - w: writer, - } -} - -type consoleStenographer struct { - color bool - denoter string - cursorState cursorStateType - enableFlakes bool - w io.Writer -} - -var alternatingColors = []string{defaultStyle, grayColor} - -func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { - if succinct { - s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description)) - return - } - s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=") - s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed)) - if randomizingAll { - s.print(0, " - Will randomize all specs") - } - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { - if succinct { - s.print(0, "- node #%d ", node) - return - } - s.println(0, - "Parallel test node %s/%s.", - s.colorize(boldStyle, "%d", node), - s.colorize(boldStyle, "%d", nodes), - ) - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { - if succinct { - s.print(0, "- %d nodes ", nodes) - return - } - s.println(0, - "Running in parallel across %s nodes", - s.colorize(boldStyle, "%d", nodes), - ) - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { - if succinct { - s.print(0, "- %d/%d specs ", specsToRun, total) - s.stream() - return - } - s.println(0, - "Will run %s of %s specs", - s.colorize(boldStyle, "%d", specsToRun), - s.colorize(boldStyle, "%d", total), - ) - - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { - if succinct { - s.print(0, "- %d specs ", total) - s.stream() - return - } - s.println(0, - "Will run %s specs", - s.colorize(boldStyle, "%d", total), - ) - - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { - if succinct && summary.SuiteSucceeded { - s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime) - return - } - s.printNewLine() - color := greenColor - if !summary.SuiteSucceeded { - color = redColor - } - s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds())) - - status := "" - if summary.SuiteSucceeded { - status = s.colorize(boldStyle+greenColor, "SUCCESS!") - } else { - status = s.colorize(boldStyle+redColor, "FAIL!") - } - - flakes := "" - if s.enableFlakes { - flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs) - } - - s.print(0, - "%s -- %s | %s | %s | %s\n", - status, - s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs), - s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes, - s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs), - s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs), - ) -} - -func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { - s.startBlock() - for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] { - s.print(0, s.colorize(alternatingColors[i%2], text)+" ") - } - - indentation := 0 - if len(spec.ComponentTexts) > 2 { - indentation = 1 - s.printNewLine() - } - index := len(spec.ComponentTexts) - 1 - s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index])) - s.printNewLine() - s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String())) - s.printNewLine() - s.midBlock() -} - -func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace) -} - -func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace) -} - -func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) { - s.startBlock() - var message string - switch summary.State { - case types.SpecStateFailed: - message = "Failure" - case types.SpecStatePanicked: - message = "Panic" - case types.SpecStateTimedOut: - message = "Timeout" - } - - s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds())) - - indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true) - - s.printNewLine() - s.printFailure(indentation, summary.State, summary.Failure, fullTrace) - - s.endBlock() -} - -func (s *consoleStenographer) AnnounceCapturedOutput(output string) { - if output == "" { - return - } - - s.startBlock() - s.println(0, output) - s.midBlock() -} - -func (s *consoleStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) { - s.print(0, s.colorize(greenColor, s.denoter)) - s.stream() -} - -func (s *consoleStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) { - s.printBlockWithMessage( - s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()), - "", - spec, - succinct, - ) -} - -func (s *consoleStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) { - s.printBlockWithMessage( - s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter), - s.measurementReport(spec, succinct), - spec, - succinct, - ) -} - -func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { - if noisy { - s.printBlockWithMessage( - s.colorize(yellowColor, "P [PENDING]"), - "", - spec, - false, - ) - } else { - s.print(0, s.colorize(yellowColor, "P")) - s.stream() - } -} - -func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { - // Skips at runtime will have a non-empty spec.Failure. All others should be succinct. - if succinct || spec.Failure == (types.SpecFailure{}) { - s.print(0, s.colorize(cyanColor, "S")) - s.stream() - } else { - s.startBlock() - s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) - - indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) - - s.printNewLine() - s.printSkip(indentation, spec.Failure) - s.endBlock() - } -} - -func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace) -} - -func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace) -} - -func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace) -} - -func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) { - failingSpecs := []*types.SpecSummary{} - - for _, summary := range summaries { - if summary.HasFailureState() { - failingSpecs = append(failingSpecs, summary) - } - } - - if len(failingSpecs) == 0 { - return - } - - s.printNewLine() - s.printNewLine() - plural := "s" - if len(failingSpecs) == 1 { - plural = "" - } - s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural)) - for _, summary := range failingSpecs { - s.printNewLine() - if summary.HasFailureState() { - if summary.TimedOut() { - s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] ")) - } else if summary.Panicked() { - s.print(0, s.colorize(redColor+boldStyle, "[Panic!] ")) - } else if summary.Failed() { - s.print(0, s.colorize(redColor+boldStyle, "[Fail] ")) - } - s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true) - s.printNewLine() - s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String())) - } - } -} - -func (s *consoleStenographer) startBlock() { - if s.cursorState == cursorStateStreaming { - s.printNewLine() - s.printDelimiter() - } else if s.cursorState == cursorStateMidBlock { - s.printNewLine() - } -} - -func (s *consoleStenographer) midBlock() { - s.cursorState = cursorStateMidBlock -} - -func (s *consoleStenographer) endBlock() { - s.printDelimiter() - s.cursorState = cursorStateEndBlock -} - -func (s *consoleStenographer) stream() { - s.cursorState = cursorStateStreaming -} - -func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) { - s.startBlock() - s.println(0, header) - - indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct) - - if message != "" { - s.printNewLine() - s.println(indentation, message) - } - - s.endBlock() -} - -func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.startBlock() - s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) - - indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) - - s.printNewLine() - s.printFailure(indentation, spec.State, spec.Failure, fullTrace) - s.endBlock() -} - -func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string { - switch failedComponentType { - case types.SpecComponentTypeBeforeSuite: - return " in Suite Setup (BeforeSuite)" - case types.SpecComponentTypeAfterSuite: - return " in Suite Teardown (AfterSuite)" - case types.SpecComponentTypeBeforeEach: - return " in Spec Setup (BeforeEach)" - case types.SpecComponentTypeJustBeforeEach: - return " in Spec Setup (JustBeforeEach)" - case types.SpecComponentTypeAfterEach: - return " in Spec Teardown (AfterEach)" - } - - return "" -} - -func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) { - s.println(indentation, s.colorize(cyanColor, spec.Message)) - s.printNewLine() - s.println(indentation, spec.Location.String()) -} - -func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) { - if state == types.SpecStatePanicked { - s.println(indentation, s.colorize(redColor+boldStyle, failure.Message)) - s.println(indentation, s.colorize(redColor, failure.ForwardedPanic)) - s.println(indentation, failure.Location.String()) - s.printNewLine() - s.println(indentation, s.colorize(redColor, "Full Stack Trace")) - s.println(indentation, failure.Location.FullStackTrace) - } else { - s.println(indentation, s.colorize(redColor, failure.Message)) - s.printNewLine() - s.println(indentation, failure.Location.String()) - if fullTrace { - s.printNewLine() - s.println(indentation, s.colorize(redColor, "Full Stack Trace")) - s.println(indentation, failure.Location.FullStackTrace) - } - } -} - -func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { - startIndex := 1 - indentation := 0 - - if len(componentTexts) == 1 { - startIndex = 0 - } - - for i := startIndex; i < len(componentTexts); i++ { - if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex { - color := redColor - if state == types.SpecStateSkipped { - color = cyanColor - } - blockType := "" - switch failedComponentType { - case types.SpecComponentTypeBeforeSuite: - blockType = "BeforeSuite" - case types.SpecComponentTypeAfterSuite: - blockType = "AfterSuite" - case types.SpecComponentTypeBeforeEach: - blockType = "BeforeEach" - case types.SpecComponentTypeJustBeforeEach: - blockType = "JustBeforeEach" - case types.SpecComponentTypeAfterEach: - blockType = "AfterEach" - case types.SpecComponentTypeIt: - blockType = "It" - case types.SpecComponentTypeMeasure: - blockType = "Measurement" - } - if succinct { - s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i])) - } else { - s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType)) - s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) - } - } else { - if succinct { - s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i])) - } else { - s.println(indentation, componentTexts[i]) - s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) - } - } - indentation++ - } - - return indentation -} - -func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { - indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct) - - if succinct { - if len(componentTexts) > 0 { - s.printNewLine() - s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1])) - } - s.printNewLine() - indentation = 1 - } else { - indentation-- - } - - return indentation -} - -func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string { - orderedKeys := make([]string, len(measurements)) - for key, measurement := range measurements { - orderedKeys[measurement.Order] = key - } - return orderedKeys -} - -func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string { - if len(spec.Measurements) == 0 { - return "Found no measurements" - } - - message := []string{} - orderedKeys := s.orderedMeasurementKeys(spec.Measurements) - - if succinct { - message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) - for _, key := range orderedKeys { - measurement := spec.Measurements[key] - message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s", - s.colorize(boldStyle, "%s", measurement.Name), - measurement.SmallestLabel, - s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), - measurement.Units, - measurement.AverageLabel, - s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), - measurement.Units, - s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), - measurement.Units, - measurement.LargestLabel, - s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), - measurement.Units, - )) - } - } else { - message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) - for _, key := range orderedKeys { - measurement := spec.Measurements[key] - info := "" - if measurement.Info != nil { - message = append(message, fmt.Sprintf("%v", measurement.Info)) - } - - message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s", - s.colorize(boldStyle, "%s", measurement.Name), - info, - measurement.SmallestLabel, - s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), - measurement.Units, - measurement.LargestLabel, - s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), - measurement.Units, - measurement.AverageLabel, - s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), - measurement.Units, - s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), - measurement.Units, - )) - } - } - - return strings.Join(message, "\n") -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md deleted file mode 100644 index e84226a7..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# go-colorable - -Colorable writer for windows. - -For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) -This package is possible to handle escape sequence for ansi color on windows. - -## Too Bad! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) - - -## So Good! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) - -## Usage - -```go -logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) -logrus.SetOutput(colorable.NewColorableStdout()) - -logrus.Info("succeeded") -logrus.Warn("not correct") -logrus.Error("something error") -logrus.Fatal("panic") -``` - -You can compile above code on non-windows OSs. - -## Installation - -``` -$ go get github.com/mattn/go-colorable -``` - -# License - -MIT - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go deleted file mode 100644 index 52d6653b..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !windows - -package colorable - -import ( - "io" - "os" -) - -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - return file -} - -func NewColorableStdout() io.Writer { - return os.Stdout -} - -func NewColorableStderr() io.Writer { - return os.Stderr -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go deleted file mode 100644 index fb976dbd..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go +++ /dev/null @@ -1,57 +0,0 @@ -package colorable - -import ( - "bytes" - "fmt" - "io" -) - -type NonColorable struct { - out io.Writer - lastbuf bytes.Buffer -} - -func NewNonColorable(w io.Writer) io.Writer { - return &NonColorable{out: w} -} - -func (w *NonColorable) Write(data []byte) (n int, err error) { - er := bytes.NewBuffer(data) -loop: - for { - c1, _, err := er.ReadRune() - if err != nil { - break loop - } - if c1 != 0x1b { - fmt.Fprint(w.out, string(c1)) - continue - } - c2, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - break loop - } - if c2 != 0x5b { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - continue - } - - var buf bytes.Buffer - for { - c, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - w.lastbuf.Write(buf.Bytes()) - break loop - } - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - break - } - buf.Write([]byte(string(c))) - } - } - return len(data) - w.lastbuf.Len(), nil -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE deleted file mode 100644 index 65dc692b..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) Yasuhiro MATSUMOTO - -MIT License (Expat) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md deleted file mode 100644 index 74845de4..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# go-isatty - -isatty for golang - -## Usage - -```go -package main - -import ( - "fmt" - "github.com/mattn/go-isatty" - "os" -) - -func main() { - if isatty.IsTerminal(os.Stdout.Fd()) { - fmt.Println("Is Terminal") - } else { - fmt.Println("Is Not Terminal") - } -} -``` - -## Installation - -``` -$ go get github.com/mattn/go-isatty -``` - -# License - -MIT - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go deleted file mode 100644 index 17d4f90e..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package isatty implements interface to isatty -package isatty diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go deleted file mode 100644 index 83c58877..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build appengine - -package isatty - -// IsTerminal returns true if the file descriptor is terminal which -// is always false on on appengine classic which is a sandboxed PaaS. -func IsTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go deleted file mode 100644 index 98ffe86a..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build darwin freebsd openbsd netbsd -// +build !appengine - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TIOCGETA - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go deleted file mode 100644 index 9d24bac1..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux -// +build !appengine - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go deleted file mode 100644 index 1f0c6bf5..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build solaris -// +build !appengine - -package isatty - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c -func IsTerminal(fd uintptr) bool { - var termio unix.Termio - err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) - return err == nil -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go deleted file mode 100644 index 83c398b1..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build windows -// +build !appengine - -package isatty - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") -var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go deleted file mode 100644 index 84fd8aff..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go +++ /dev/null @@ -1,106 +0,0 @@ -/* - -TeamCity Reporter for Ginkgo - -Makes use of TeamCity's support for Service Messages -http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests -*/ - -package reporters - -import ( - "fmt" - "io" - "strings" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" -) - -const ( - messageId = "##teamcity" -) - -type TeamCityReporter struct { - writer io.Writer - testSuiteName string - ReporterConfig config.DefaultReporterConfigType -} - -func NewTeamCityReporter(writer io.Writer) *TeamCityReporter { - return &TeamCityReporter{ - writer: writer, - } -} - -func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - reporter.testSuiteName = escape(summary.SuiteDescription) - fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']\n", messageId, reporter.testSuiteName) -} - -func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("BeforeSuite", setupSummary) -} - -func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("AfterSuite", setupSummary) -} - -func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - testName := escape(name) - fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName) - message := reporter.failureMessage(setupSummary.Failure) - details := reporter.failureDetails(setupSummary.Failure) - fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details) - durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000 - fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds) - } -} - -func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) { - testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) - fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName) -} - -func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) { - testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) - - if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed { - details := escape(specSummary.CapturedOutput) - fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']\n", messageId, testName, details) - } - if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { - message := reporter.failureMessage(specSummary.Failure) - details := reporter.failureDetails(specSummary.Failure) - fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details) - } - if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { - fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']\n", messageId, testName) - } - - durationInMilliseconds := specSummary.RunTime.Seconds() * 1000 - fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds) -} - -func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']\n", messageId, reporter.testSuiteName) -} - -func (reporter *TeamCityReporter) failureMessage(failure types.SpecFailure) string { - return escape(failure.ComponentCodeLocation.String()) -} - -func (reporter *TeamCityReporter) failureDetails(failure types.SpecFailure) string { - return escape(fmt.Sprintf("%s\n%s", failure.Message, failure.Location.String())) -} - -func escape(output string) string { - output = strings.Replace(output, "|", "||", -1) - output = strings.Replace(output, "'", "|'", -1) - output = strings.Replace(output, "\n", "|n", -1) - output = strings.Replace(output, "\r", "|r", -1) - output = strings.Replace(output, "[", "|[", -1) - output = strings.Replace(output, "]", "|]", -1) - return output -} diff --git a/vendor/github.com/onsi/ginkgo/types/code_location.go b/vendor/github.com/onsi/ginkgo/types/code_location.go deleted file mode 100644 index 935a89e1..00000000 --- a/vendor/github.com/onsi/ginkgo/types/code_location.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -import ( - "fmt" -) - -type CodeLocation struct { - FileName string - LineNumber int - FullStackTrace string -} - -func (codeLocation CodeLocation) String() string { - return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) -} diff --git a/vendor/github.com/onsi/ginkgo/types/synchronization.go b/vendor/github.com/onsi/ginkgo/types/synchronization.go deleted file mode 100644 index fdd6ed5b..00000000 --- a/vendor/github.com/onsi/ginkgo/types/synchronization.go +++ /dev/null @@ -1,30 +0,0 @@ -package types - -import ( - "encoding/json" -) - -type RemoteBeforeSuiteState int - -const ( - RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota - - RemoteBeforeSuiteStatePending - RemoteBeforeSuiteStatePassed - RemoteBeforeSuiteStateFailed - RemoteBeforeSuiteStateDisappeared -) - -type RemoteBeforeSuiteData struct { - Data []byte - State RemoteBeforeSuiteState -} - -func (r RemoteBeforeSuiteData) ToJSON() []byte { - data, _ := json.Marshal(r) - return data -} - -type RemoteAfterSuiteData struct { - CanRun bool -} diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go deleted file mode 100644 index c143e02d..00000000 --- a/vendor/github.com/onsi/ginkgo/types/types.go +++ /dev/null @@ -1,174 +0,0 @@ -package types - -import ( - "strconv" - "time" -) - -const GINKGO_FOCUS_EXIT_CODE = 197 - -/* -SuiteSummary represents the a summary of the test suite and is passed to both -Reporter.SpecSuiteWillBegin -Reporter.SpecSuiteDidEnd - -this is unfortunate as these two methods should receive different objects. When running in parallel -each node does not deterministically know how many specs it will end up running. - -Unfortunately making such a change would break backward compatibility. - -Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unknown fields -with -1. -*/ -type SuiteSummary struct { - SuiteDescription string - SuiteSucceeded bool - SuiteID string - - NumberOfSpecsBeforeParallelization int - NumberOfTotalSpecs int - NumberOfSpecsThatWillBeRun int - NumberOfPendingSpecs int - NumberOfSkippedSpecs int - NumberOfPassedSpecs int - NumberOfFailedSpecs int - // Flaked specs are those that failed initially, but then passed on a - // subsequent try. - NumberOfFlakedSpecs int - RunTime time.Duration -} - -type SpecSummary struct { - ComponentTexts []string - ComponentCodeLocations []CodeLocation - - State SpecState - RunTime time.Duration - Failure SpecFailure - IsMeasurement bool - NumberOfSamples int - Measurements map[string]*SpecMeasurement - - CapturedOutput string - SuiteID string -} - -func (s SpecSummary) HasFailureState() bool { - return s.State.IsFailure() -} - -func (s SpecSummary) TimedOut() bool { - return s.State == SpecStateTimedOut -} - -func (s SpecSummary) Panicked() bool { - return s.State == SpecStatePanicked -} - -func (s SpecSummary) Failed() bool { - return s.State == SpecStateFailed -} - -func (s SpecSummary) Passed() bool { - return s.State == SpecStatePassed -} - -func (s SpecSummary) Skipped() bool { - return s.State == SpecStateSkipped -} - -func (s SpecSummary) Pending() bool { - return s.State == SpecStatePending -} - -type SetupSummary struct { - ComponentType SpecComponentType - CodeLocation CodeLocation - - State SpecState - RunTime time.Duration - Failure SpecFailure - - CapturedOutput string - SuiteID string -} - -type SpecFailure struct { - Message string - Location CodeLocation - ForwardedPanic string - - ComponentIndex int - ComponentType SpecComponentType - ComponentCodeLocation CodeLocation -} - -type SpecMeasurement struct { - Name string - Info interface{} - Order int - - Results []float64 - - Smallest float64 - Largest float64 - Average float64 - StdDeviation float64 - - SmallestLabel string - LargestLabel string - AverageLabel string - Units string - Precision int -} - -func (s SpecMeasurement) PrecisionFmt() string { - if s.Precision == 0 { - return "%f" - } - - str := strconv.Itoa(s.Precision) - - return "%." + str + "f" -} - -type SpecState uint - -const ( - SpecStateInvalid SpecState = iota - - SpecStatePending - SpecStateSkipped - SpecStatePassed - SpecStateFailed - SpecStatePanicked - SpecStateTimedOut -) - -func (state SpecState) IsFailure() bool { - return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed -} - -type SpecComponentType uint - -const ( - SpecComponentTypeInvalid SpecComponentType = iota - - SpecComponentTypeContainer - SpecComponentTypeBeforeSuite - SpecComponentTypeAfterSuite - SpecComponentTypeBeforeEach - SpecComponentTypeJustBeforeEach - SpecComponentTypeJustAfterEach - SpecComponentTypeAfterEach - SpecComponentTypeIt - SpecComponentTypeMeasure -) - -type FlagType uint - -const ( - FlagTypeNone FlagType = iota - FlagTypeFocused - FlagTypePending -) diff --git a/vendor/github.com/onsi/ginkgo/.gitignore b/vendor/github.com/onsi/ginkgo/v2/.gitignore similarity index 79% rename from vendor/github.com/onsi/ginkgo/.gitignore rename to vendor/github.com/onsi/ginkgo/v2/.gitignore index b9f9659d..edf0231c 100644 --- a/vendor/github.com/onsi/ginkgo/.gitignore +++ b/vendor/github.com/onsi/ginkgo/v2/.gitignore @@ -1,7 +1,7 @@ .DS_Store -TODO +TODO.md tmp/**/* *.coverprofile .vscode .idea/ -*.log +*.log \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md similarity index 99% rename from vendor/github.com/onsi/ginkgo/CHANGELOG.md rename to vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index a26bc530..3b306ab3 100644 --- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,7 @@ +## 2.0.0 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) + ## 1.16.5 Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC. @@ -35,7 +39,6 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - Add slim-sprig template functions to bootstrap/generate (#775) [9162b86] -### Fixes - Fix accidental reference to 1488 (#784) [9fb7fe4] ## 1.15.2 diff --git a/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md similarity index 50% rename from vendor/github.com/onsi/ginkgo/CONTRIBUTING.md rename to vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md index 908b95c2..15079406 100644 --- a/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md +++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -6,28 +6,8 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp - Ensure adequate test coverage: - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. -- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch. - If relevant, please submit a docs PR to that branch alongside your code PR. +- Make sure all the tests succeed via `ginkgo -r -p` +- Vet your changes via `go vet ./...` +- Update the documentation. Ginko uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. -Thanks for supporting Ginkgo! - -## Setup - -Fork the repo, then: - -``` -go get github.com/onsi/ginkgo -go get github.com/onsi/gomega/... -cd $GOPATH/src/github.com/onsi/ginkgo -git remote add fork git@github.com:/ginkgo.git - -ginkgo -r -p # ensure tests are green -go vet ./... # ensure linter is happy -``` - -## Making the PR - - go to a new branch `git checkout -b my-feature` - - make your changes - - run tests and linter again (see above) - - `git push fork` - - open PR 🎉 +Thanks for supporting Ginkgo! \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/LICENSE b/vendor/github.com/onsi/ginkgo/v2/LICENSE similarity index 100% rename from vendor/github.com/onsi/ginkgo/LICENSE rename to vendor/github.com/onsi/ginkgo/v2/LICENSE diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md new file mode 100644 index 00000000..b2425061 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/README.md @@ -0,0 +1,119 @@ +![Ginkgo](https://onsi.github.io/ginkgo/images/ginkgo.png) + +[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/) + +--- + +# Ginkgo 2.0 is now Generally Available! + +You can learn more about 2.0 in the [Migration Guide](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)! + +--- + +Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly: + +```go +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + ... +) + +Describe("Checking books out of the library", Label("library"), func() { + var library *libraries.Library + var book *books.Book + var valjean *users.User + BeforeEach(func() { + library = libraries.NewClient() + book = &books.Book{ + Title: "Les Miserables", + Author: "Victor Hugo", + } + valjean = users.NewUser("Jean Valjean") + }) + + When("the library has the book in question", func() { + BeforeEach(func() { + Expect(library.Store(book)).To(Succeed()) + }) + + Context("and the book is available", func() { + It("lends it to the reader", func() { + Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) + Expect(valjean.Books()).To(ContainElement(book)) + Expect(library.UserWithBook(book)).To(Equal(valjean)) + }) + }) + + Context("but the book has already been checked out", func() { + var javert *users.User + BeforeEach(func() { + javert = users.NewUser("Javert") + Expect(javert.Checkout(library, "Les Miserables")).To(Succeed()) + }) + + It("tells the user", func() { + err := valjean.Checkout(library, "Les Miserables") + Expect(error).To(MatchError("Les Miserables is currently checked out")) + }) + + It("lets the user place a hold and get notified later", func() { + Expect(valjean.Hold(library, "Les Miserables")).To(Succeed()) + Expect(valjean.Holds()).To(ContainElement(book)) + + By("when Javert returns the book") + Expect(javert.Return(library, book)).To(Succeed()) + + By("it eventually informs Valjean") + notification := "Les Miserables is ready for pick up" + Eventually(valjean.Notifications).Should(ContainElement(notification)) + + Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) + Expect(valjean.Books()).To(ContainElement(book)) + Expect(valjean.Holds()).To(BeEmpty()) + }) + }) + }) + + When("the library does not have the book in question", func() { + It("tells the reader the book is unavailable", func() { + err := valjean.Checkout(library, "Les Miserables") + Expect(error).To(MatchError("Les Miserables is not in the library catalog")) + }) + }) +}) +``` + +Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first specs](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite). + +If you have a question, comment, bug report, feature request, etc. please open a [GitHub issue](https://github.com/onsi/ginkgo/issues/new), or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW). + +## Capabilities + +Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://olivinelabs.com/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. + +With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs) + +At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as + +```bash +ginkgo -p +``` + +By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. + +As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically). + +Ginkgo ships with `ginkgo`, a [command line tool](https://onsi.github.io/ginkgo/#ginkgo-cli-overview) with support for generating, running, filtering, and profiling Ginkgo suites. You can even have Ginkgo automatically run your specs when it detects a change with `ginkgo watch`, enabling rapid feedback loops during test-driven development. + +And that's just Ginkgo! [Gomega](https://onsi.github.io/gomega/) brings a rich, mature, family of [assertions and matchers](https://onsi.github.io/gomega/#provided-matchers) to your suites. With Gomega you can easily mix [synchronous and asynchronous assertions](https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing) in your specs. You can even build your own set of expressive domain-specific matchers quickly and easily by composing Gomega's [existing building blocks](https://onsi.github.io/ginkgo/#building-custom-matchers). + +Happy Testing! + +## License + +Ginkgo is MIT-Licensed + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/vendor/github.com/onsi/ginkgo/RELEASING.md b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md similarity index 92% rename from vendor/github.com/onsi/ginkgo/RELEASING.md rename to vendor/github.com/onsi/ginkgo/v2/RELEASING.md index db3d234c..0c80f668 100644 --- a/vendor/github.com/onsi/ginkgo/RELEASING.md +++ b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md @@ -7,7 +7,7 @@ A Ginkgo release is a tagged git sha and a GitHub release. To cut a release: - New Features (minor version) - Fixes (fix version) - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) -1. Update `VERSION` in `config/config.go` +1. Update `VERSION` in `types/version.go` 1. Commit, push, and release: ``` git commit -m "vM.m.p" diff --git a/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go new file mode 100644 index 00000000..67b351a0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go @@ -0,0 +1,69 @@ +package config + +// GinkgoConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type GinkgoConfigType = DeprecatedGinkgoConfigType +type DeprecatedGinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + RegexScansFilePath bool + FocusStrings []string + SkipStrings []string + SkipMeasurements bool + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + DebugParallel bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +// DefaultReporterConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType +type DeprecatedDefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + NoisySkippings bool + Succinct bool + Verbose bool + FullTrace bool + ReportPassed bool + ReportFile string +} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguraiton() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguraiton() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguraiton() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguraiton() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go new file mode 100644 index 00000000..df1bb3fc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -0,0 +1,668 @@ +/* +Ginkgo is a testing framework for Go designed to help you write expressive tests. +https://github.com/onsi/ginkgo +MIT-Licensed + +The godoc documentation outlines Ginkgo's API. Since Ginkgo is a Domain-Specific Language it is important to +build a mental model for Ginkgo - the narrative documentation at https://onsi.github.io/ginkgo/ is designed to help you do that. +You should start there - even a brief skim will be helpful. At minimum you should skim through the https://onsi.github.io/ginkgo/#getting-started chapter. + +Ginkgo's is best paired with the Gomega matcher library: https://github.com/onsi/gomega + +You can run Ginkgo specs with go test - however we recommend using the ginkgo cli. It enables functionality +that go test does not (especially running suites in parallel). You can learn more at https://onsi.github.io/ginkgo/#ginkgo-cli-overview +or by running 'ginkgo help'. +*/ +package ginkgo + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/internal/global" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +const GINKGO_VERSION = types.VERSION + +var flagSet types.GinkgoFlagSet +var deprecationTracker = types.NewDeprecationTracker() +var suiteConfig = types.NewDefaultSuiteConfig() +var reporterConfig = types.NewDefaultReporterConfig() +var suiteDidRun = false +var outputInterceptor internal.OutputInterceptor +var client parallel_support.Client + +func init() { + var err error + flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig) + exitIfErr(err) + GinkgoWriter = internal.NewWriter(os.Stdout) +} + +func exitIfErr(err error) { + if err != nil { + if outputInterceptor != nil { + outputInterceptor.Shutdown() + } + if client != nil { + client.Close() + } + fmt.Fprintln(formatter.ColorableStdErr, err.Error()) + os.Exit(1) + } +} + +func exitIfErrors(errors []error) { + if len(errors) > 0 { + if outputInterceptor != nil { + outputInterceptor.Shutdown() + } + if client != nil { + client.Close() + } + for _, err := range errors { + fmt.Fprintln(formatter.ColorableStdErr, err.Error()) + } + os.Exit(1) + } +} + +//The interface implemented by GinkgoWriter +type GinkgoWriterInterface interface { + io.Writer + + Print(a ...interface{}) + Printf(format string, a ...interface{}) + Println(a ...interface{}) + + TeeTo(writer io.Writer) + ClearTeeWriters() +} + +/* +GinkgoWriter implements a GinkgoWriterInterface and io.Writer + +When running in verbose mode (ginkgo -v) any writes to GinkgoWriter will be immediately printed +to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen +only if the current test fails. + +GinkgoWriter also provides convenience Print, Printf and Println methods and allows you to tee to a custom writer via GinkgoWriter.TeeTo(writer). +Writes to GinkgoWriter are immediately sent to any registered TeeTo() writers. You can unregister all TeeTo() Writers with GinkgoWriter.ClearTeeWriters() + +You can learn more at https://onsi.github.io/ginkgo/#logging-output +*/ +var GinkgoWriter GinkgoWriterInterface + +//The interface by which Ginkgo receives *testing.T +type GinkgoTestingT interface { + Fail() +} + +/* +GinkgoConfiguration returns the configuration of the current suite. + +The first return value is the SuiteConfig which controls aspects of how the suite runs, +the second return value is the ReporterConfig which controls aspects of how Ginkgo's default +reporter emits output. + +Mutating the returned configurations has no effect. To reconfigure Ginkgo programatically you need +to pass in your mutated copies into RunSpecs(). + +You can learn more at https://onsi.github.io/ginkgo/#overriding-ginkgos-command-line-configuration-in-the-suite +*/ +func GinkgoConfiguration() (types.SuiteConfig, types.ReporterConfig) { + return suiteConfig, reporterConfig +} + +/* +GinkgoRandomSeed returns the seed used to randomize spec execution order. It is +useful for seeding your own pseudorandom number generators to ensure +consistent executions from run to run, where your tests contain variability (for +example, when selecting random spec data). + +You can learn more at https://onsi.github.io/ginkgo/#spec-randomization +*/ +func GinkgoRandomSeed() int64 { + return suiteConfig.RandomSeed +} + +/* +GinkgoParallelProcess returns the parallel process number for the current ginkgo process +The process number is 1-indexed. You can use GinkgoParallelProcess() to shard access to shared +resources across your suites. You can learn more about patterns for sharding at https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs + +For more on how specs are parallelized in Ginkgo, see http://onsi.github.io/ginkgo/#spec-parallelization +*/ +func GinkgoParallelProcess() int { + return suiteConfig.ParallelProcess +} + +/* +PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant +when running in parallel and output to stdout/stderr is being intercepted. You generally +don't need to call this function - however there are cases when Ginkgo's output interception +mechanisms can interfere with external processes launched by the test process. + +In particular, if an external process is launched that has cmd.Stdout/cmd.Stderr set to os.Stdout/os.Stderr +then Ginkgo's output interceptor will hang. To circumvent this, set cmd.Stdout/cmd.Stderr to GinkgoWriter. +If, for some reason, you aren't able to do that, you can PauseOutputInterception() before starting the process +then ResumeOutputInterception() after starting it. + +Note that PauseOutputInterception() does not cause stdout writes to print to the console - +this simply stops intercepting and storing stdout writes to an internal buffer. +*/ +func PauseOutputInterception() { + if outputInterceptor == nil { + return + } + outputInterceptor.PauseIntercepting() +} + +//ResumeOutputInterception() - see docs for PauseOutputInterception() +func ResumeOutputInterception() { + if outputInterceptor == nil { + return + } + outputInterceptor.ResumeIntercepting() +} + +/* +RunSpecs is the entry point for the Ginkgo spec runner. + +You must call this within a Golang testing TestX(t *testing.T) function. +If you bootstrapped your suite with "ginkgo bootstrap" this is already +done for you. + +Ginkgo is typically configured via command-line flags. This configuration +can be overriden, however, and passed into RunSpecs as optional arguments: + + func TestMySuite(t *testing.T) { + RegisterFailHandler(gomega.Fail) + // fetch the current config + suiteConfig, reporterConfig := GinkgoConfiguration() + // adjust it + suiteConfig.SkipStrings = []string{"NEVER-RUN"} + reporterConfig.FullTrace = true + // pass it in to RunSpecs + RunSpecs(t, "My Suite", suiteConfig, reporterConfig) + } + +Note that some configuration changes can lead to undefined behavior. For example, +you should not change ParallelProcess or ParallelTotal as the Ginkgo CLI is responsible +for setting these and orchestrating parallel specs across the parallel processes. See http://onsi.github.io/ginkgo/#spec-parallelization +for more on how specs are parallelized in Ginkgo. + +You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite. +*/ +func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { + if suiteDidRun { + exitIfErr(types.GinkgoErrors.RerunningSuite()) + } + suiteDidRun = true + + suiteLabels := Labels{} + configErrors := []error{} + for _, arg := range args { + switch arg := arg.(type) { + case types.SuiteConfig: + suiteConfig = arg + case types.ReporterConfig: + reporterConfig = arg + case Labels: + suiteLabels = append(suiteLabels, arg...) + default: + configErrors = append(configErrors, types.GinkgoErrors.UnkownTypePassedToRunSpecs(arg)) + } + } + exitIfErrors(configErrors) + + configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig) + if len(configErrors) > 0 { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n")) + for _, err := range configErrors { + fmt.Fprintf(formatter.ColorableStdErr, err.Error()) + } + os.Exit(1) + } + + var reporter reporters.Reporter + if suiteConfig.ParallelTotal == 1 { + reporter = reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut) + outputInterceptor = internal.NoopOutputInterceptor{} + client = nil + } else { + reporter = reporters.NoopReporter{} + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { + case "swap": + outputInterceptor = internal.NewOSGlobalReassigningOutputInterceptor() + case "none": + outputInterceptor = internal.NoopOutputInterceptor{} + default: + outputInterceptor = internal.NewOutputInterceptor() + } + client = parallel_support.NewClient(suiteConfig.ParallelHost) + if !client.Connect() { + client = nil + exitIfErr(types.GinkgoErrors.UnreachableParallelHost(suiteConfig.ParallelHost)) + } + defer client.Close() + } + + writer := GinkgoWriter.(*internal.Writer) + if reporterConfig.Verbose && suiteConfig.ParallelTotal == 1 { + writer.SetMode(internal.WriterModeStreamAndBuffer) + } else { + writer.SetMode(internal.WriterModeBufferOnly) + } + + if reporterConfig.WillGenerateReport() { + registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig) + } + + err := global.Suite.BuildTree() + exitIfErr(err) + + suitePath, err := os.Getwd() + exitIfErr(err) + suitePath, err = filepath.Abs(suitePath) + exitIfErr(err) + + passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(suiteConfig.Timeout, client), client, suiteConfig) + outputInterceptor.Shutdown() + + flagSet.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(formatter.ColorableStdErr, deprecationTracker.DeprecationsReport()) + } + + if !passed { + t.Fail() + } + + if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Println("PASS | FOCUSED") + os.Exit(types.GINKGO_FOCUS_EXIT_CODE) + } + return passed +} + +/* +Skip instructs Ginkgo to skip the current spec + +You can call Skip in any Setup or Subject node closure. + +For more on how to filter specs in Ginkgo see https://onsi.github.io/ginkgo/#filtering-specs +*/ +func Skip(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + cl := types.NewCodeLocationWithStackTrace(skip + 1) + global.Failer.Skip(message, cl) + panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) +} + +/* +Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.) + +Under the hood, Fail panics to end execution of the current spec. Ginkgo will catch this panic and proceed with +the subsequent spec. If you call Fail, or make an assertion, within a goroutine launched by your spec you must +add defer GinkgoRecover() to the goroutine to catch the panic emitted by Fail. + +You can call Fail in any Setup or Subject node closure. + +You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure +*/ +func Fail(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + cl := types.NewCodeLocationWithStackTrace(skip + 1) + global.Failer.Fail(message, cl) + panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) +} + +/* +AbortSuite instructs Ginkgo to fail the current spec and skip all subsequent specs, thereby aborting the suite. + +You can call AbortSuite in any Setup or Subject node closure. + +You can learn more about how Ginkgo handles suite interruptions here: https://onsi.github.io/ginkgo/#interrupting-aborting-and-timing-out-suites +*/ +func AbortSuite(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + cl := types.NewCodeLocationWithStackTrace(skip + 1) + global.Failer.AbortSuite(message, cl) + panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) +} + +/* +GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` +Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that +calls out to Gomega + +Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent +further assertions from running. This panic must be recovered. Normally, Ginkgo recovers the panic for you, +however if a panic originates on a goroutine *launched* from one of your specs there's no +way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine. + +You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure +*/ +func GinkgoRecover() { + e := recover() + if e != nil { + global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e) + } +} + +// pushNode is used by the various test construction DSL methods to push nodes onto the suite +// it handles returned errors, emits a detailed error message to help the user learn what they may have done wrong, then exits +func pushNode(node internal.Node, errors []error) bool { + exitIfErrors(errors) + exitIfErr(global.Suite.PushNode(node)) + return true +} + +/* +Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of +Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It). + +Context and When nodes are aliases for Describe - use whichever gives your suite a better narrative flow. It is idomatic +to Describe the behavior of an object or function and, within that Describe, outline a number of Contexts and Whens. + +You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes +In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func Describe(text string, args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) +} + +/* +FDescribe focuses specs within the Describe block. +*/ +func FDescribe(text string, args ...interface{}) bool { + args = append(args, internal.Focus) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) +} + +/* +PDescribe marks specs within the Describe block as pending. +*/ +func PDescribe(text string, args ...interface{}) bool { + args = append(args, internal.Pending) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) +} + +/* +XDescribe marks specs within the Describe block as pending. + +XDescribe is an alias for PDescribe +*/ +var XDescribe = PDescribe + +/* Context is an alias for Describe - it generates the exact same kind of Container node */ +var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe + +/* When is an alias for Describe - it generates the exact same kind of Container node */ +var When, FWhen, PWhen, XWhen = Describe, FDescribe, PDescribe, XDescribe + +/* +It nodes are Subject nodes that contain your spec code and assertions. + +Each It node corresponds to an individual Ginkgo spec. You cannot nest any other Ginkgo nodes within an It node's closure. + +You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it +In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func It(text string, args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) +} + +/* +FIt allows you to focus an individual It. +*/ +func FIt(text string, args ...interface{}) bool { + args = append(args, internal.Focus) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) +} + +/* +PIt allows you to mark an individual It as pending. +*/ +func PIt(text string, args ...interface{}) bool { + args = append(args, internal.Pending) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) +} + +/* +XIt allows you to mark an individual It as pending. + +XIt is an alias for PIt +*/ +var XIt = PIt + +/* +Specify is an alias for It - it can allow for more natural wording in some context. +*/ +var Specify, FSpecify, PSpecify, XSpecify = It, FIt, PIt, XIt + +/* +By allows you to better document complex Specs. + +Generally you should try to keep your Its short and to the point. This is not always possible, however, +especially in the context of integration tests that capture complex or lengthy workflows. + +By allows you to document such flows. By may be called within a Setup or Subject node (It, BeforeEach, etc...) +and will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function. + +By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports. + +Note that By does not generate a new Ginkgo node - rather it is simply synctactic sugar around GinkgoWriter and AddReportEntry +You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by +*/ +func By(text string, callback ...func()) { + value := struct { + Text string + Duration time.Duration + }{ + Text: text, + } + t := time.Now() + AddReportEntry("By Step", ReportEntryVisibilityNever, Offset(1), &value, t) + formatter := formatter.NewWithNoColorBool(reporterConfig.NoColor) + GinkgoWriter.Println(formatter.F("{{bold}}STEP:{{/}} %s {{gray}}%s{{/}}", text, t.Format(types.GINKGO_TIME_FORMAT))) + if len(callback) == 1 { + callback[0]() + value.Duration = time.Since(t) + } + if len(callback) > 1 { + panic("just one callback per By, please") + } +} + +/* +BeforeSuite nodes are suite-level Setup nodes that run just once before any specs are run. +When running in parallel, each parallel process will call BeforeSuite. + +You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. + +You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite +*/ +func BeforeSuite(body func()) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", body)) +} + +/* +AfterSuite nodes are suite-level Setup nodes run after all specs have finished - regardless of whether specs have passed or failed. +AfterSuite node closures always run, even if Ginkgo receives an interrupt signal (^C), in order to ensure cleanup occurs. + +When running in parallel, each parallel process will call AfterSuite. + +You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. + +You cannot nest any other Ginkgo nodes within an AfterSuite node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite +*/ +func AfterSuite(body func()) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", body)) +} + +/* +SynchronizedBeforeSuite nodes allow you to perform some of the suite setup just once - on parallel process #1 - and then pass information +from that setup to the rest of the suite setup on all processes. This is useful for performing expensive or singleton setup once, then passing +information from that setup to all parallel processes. + +SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them. +The first function is only run on parallel process #1. The second is run on all processes, but *only* after the first function completes successfully. The functions have the following signatures: + +The first function (which only runs on process #1) has the signature: + + func() []byte + +The byte array returned by the first function is then passed to the second function, which has the signature: + + func(data []byte) + +You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure. +You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite +*/ +func SynchronizedBeforeSuite(process1Body func() []byte, allProcessBody func([]byte)) bool { + return pushNode(internal.NewSynchronizedBeforeSuiteNode(process1Body, allProcessBody, types.NewCodeLocation(1))) +} + +/* +SynchronizedAfterSuite nodes complement the SynchronizedBeforeSuite nodes in solving the problem of splitting clean up into a piece that runs on all processes +and a piece that must only run once - on process #1. + +SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all processes. The second runs only on parallel process #1 +and *only* after all other processes have finished and exited. This ensures that process #1, and any resources it is managing, remain alive until +all other processes are finished. + +Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results. + +You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure. +You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite +*/ +func SynchronizedAfterSuite(allProcessBody func(), process1Body func()) bool { + return pushNode(internal.NewSynchronizedAfterSuiteNode(allProcessBody, process1Body, types.NewCodeLocation(1))) +} + +/* +BeforeEach nodes are Setup nodes whose closures run before It node closures. When multiple BeforeEach nodes +are defined in nested Container nodes the outermost BeforeEach node closures are run first. + +You cannot nest any other Ginkgo nodes within a BeforeEach node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach +*/ +func BeforeEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...)) +} + +/* +JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure. +This can allow you to separate configuration from creation of resources for a spec. + +You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure. +You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach +*/ +func JustBeforeEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...)) +} + +/* +AfterEach nodes are Setup nodes whose closures run after It node closures. When multiple AfterEach nodes +are defined in nested Container nodes the innermost AfterEach node closures are run first. + +Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results. + +You cannot nest any other Ginkgo nodes within an AfterEach node's closure. +You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup +*/ +func AfterEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...)) +} + +/* +JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec. + +You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure. +You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach +*/ +func JustAfterEach(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...)) +} + +/* +BeforeAll nodes are Setup nodes that can occur inside Ordered contaienrs. They run just once before any specs in the Ordered container run. + +Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container. + +You cannot nest any other Ginkgo nodes within a BeforeAll node's closure. +You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers +And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall +*/ +func BeforeAll(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...)) +} + +/* +AfterAll nodes are Setup nodes that can occur inside Ordered contaienrs. They run just once after all specs in the Ordered container have run. + +Multiple AfterAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container. + +Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior. + +You cannot nest any other Ginkgo nodes within an AfterAll node's closure. +You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers +And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall +*/ +func AfterAll(args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...)) +} + +/* +DeferCleanup can be called within any Setup or Subject node to register a cleanup callback that Ginkgo will call at the appropriate time to cleanup after the spec. + +DeferCleanup can be passed: +1. A function that takes no arguments and returns no values. +2. A function that returns an error (in which case it will assert that the returned error was nil, or it will fail the spec). +3. A function that takes arguments (and optionally returns an error) followed by a list of arguments to passe to the function. For example: + + BeforeEach(func() { + DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) + os.SetEnv("FOO", "BAR") + }) + +will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. + +When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node) +When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node) +When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, or SynchronizedAfterSuite the registered callback will be invoked when the suite completes (i.e. it will behave like an AfterSuite node) + +Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node. +You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup +*/ +func DeferCleanup(args ...interface{}) { + fail := func(message string, cl types.CodeLocation) { + global.Failer.Fail(message, cl) + } + pushNode(internal.NewCleanupNode(fail, args...)) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go new file mode 100644 index 00000000..f23e526f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -0,0 +1,82 @@ +package ginkgo + +import ( + "github.com/onsi/ginkgo/v2/internal" +) + +/* +Offset(uint) is a decorator that allows you to change the stack-frame offset used when computing the line number of the node in question. + +You can learn more here: https://onsi.github.io/ginkgo/#the-offset-decorator +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +type Offset = internal.Offset + +/* +FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass. + +You can learn more here: https://onsi.github.io/ginkgo/#repeating-spec-runs-and-managing-flaky-specs +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +type FlakeAttempts = internal.FlakeAttempts + +/* +Focus is a decorator that allows you to mark a spec or container as focused. Identical to FIt and FDescribe. + +You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Focus = internal.Focus + +/* +Pending is a decorator that allows you to mark a spec or container as pending. Identical to PIt and PDescribe. + +You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Pending = internal.Pending + +/* +Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs. +Tests in ordered containers cannot be marked as serial - mark the ordered container instead. + +You can learn more here: https://onsi.github.io/ginkgo/#serial-specs +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Serial = internal.Serial + +/* +Ordered is a decorator that allows you to mark a container as ordered. Tests in the container will always run in the order they appear. +They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs. + +You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const Ordered = internal.Ordered + +/* +OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once +per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container. +The behavior for non-Ordered containers/specs is unchanged. + +You can learh more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const OncePerOrdered = internal.OncePerOrdered + +/* +Label decorates specs with Labels. Multiple labels can be passed to Label and these can be arbitrary strings but must not include the following characters: "&|!,()/". +Labels can be applied to container and subject nodes, but not setup nodes. You can provide multiple Labels to a given node and a spec's labels is the union of all labels in its node hierarchy. + +You can learn more here: https://onsi.github.io/ginkgo/#spec-labels +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func Label(labels ...string) Labels { + return Labels(labels) +} + +/* +Labels are the type for spec Label decorators. Use Label(...) to construct Labels. +You can learn more here: https://onsi.github.io/ginkgo/#spec-labels +*/ +type Labels = internal.Labels diff --git a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go new file mode 100644 index 00000000..d20e5a8c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go @@ -0,0 +1,135 @@ +package ginkgo + +import ( + "time" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/internal/global" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +Deprecated: Done Channel for asynchronous testing + +The Done channel pattern is no longer supported in Ginkgo 2.0. +See here for better patterns for asynchronouse testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing + +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-async-testing +*/ +type Done = internal.Done + +/* +Deprecated: Custom Ginkgo test reporters are deprecated in Ginkgo 2.0. + +Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +*/ +type Reporter = reporters.DeprecatedReporter + +/* +Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithDefaultAndCustomReporters will simply call RunSpecs() + +Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +*/ +func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool { + deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) + return RunSpecs(t, description) +} + +/* +Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithCustomReporters will simply call RunSpecs() + +Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure +For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +*/ +func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool { + deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) + return RunSpecs(t, description) +} + +/* +Deprecated: GinkgoTestDescription has been replaced with SpecReport. + +Use CurrentSpecReport() instead. +You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec +The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport +*/ +type DeprecatedGinkgoTestDescription struct { + FullTestText string + ComponentTexts []string + TestText string + + FileName string + LineNumber int + + Failed bool + Duration time.Duration +} +type GinkgoTestDescription = DeprecatedGinkgoTestDescription + +/* +Deprecated: CurrentGinkgoTestDescription has been replaced with CurrentSpecReport. + +Use CurrentSpecReport() instead. +You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec +The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport +*/ +func CurrentGinkgoTestDescription() DeprecatedGinkgoTestDescription { + deprecationTracker.TrackDeprecation( + types.Deprecations.CurrentGinkgoTestDescription(), + types.NewCodeLocation(1), + ) + report := global.Suite.CurrentSpecReport() + if report.State == types.SpecStateInvalid { + return GinkgoTestDescription{} + } + componentTexts := []string{} + componentTexts = append(componentTexts, report.ContainerHierarchyTexts...) + componentTexts = append(componentTexts, report.LeafNodeText) + + return DeprecatedGinkgoTestDescription{ + ComponentTexts: componentTexts, + FullTestText: report.FullText(), + TestText: report.LeafNodeText, + FileName: report.LeafNodeLocation.FileName, + LineNumber: report.LeafNodeLocation.LineNumber, + Failed: report.State.Is(types.SpecStateFailureStates), + Duration: report.RunTime, + } +} + +/* +Deprecated: GinkgoParallelNode() has been renamed to GinkgoParallelProcess() +*/ +func GinkgoParallelNode() int { + deprecationTracker.TrackDeprecation( + types.Deprecations.ParallelNode(), + types.NewCodeLocation(1), + ) + return GinkgoParallelProcess() +} + +/* +Deprecated: Benchmarker has been removed from Ginkgo 2.0 + +Use Gomega's gmeasure package instead. +You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code +*/ +type Benchmarker interface { + Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) + RecordValue(name string, value float64, info ...interface{}) + RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) +} + +/* +Deprecated: Measure() has been removed from Ginkgo 2.0 + +Use Gomega's gmeasure package instead. +You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code +*/ +func Measure(_ ...interface{}) bool { + deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1)) + return true +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go new file mode 100644 index 00000000..778bfd7c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go @@ -0,0 +1,41 @@ +// +build !windows + +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "io" + "os" +) + +func newColorable(file *os.File) io.Writer { + return file +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go similarity index 90% rename from vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go rename to vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go index 10880092..dd1d143c 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go @@ -1,4 +1,33 @@ -package colorable +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter import ( "bytes" @@ -10,10 +39,24 @@ import ( "strings" "syscall" "unsafe" +) - "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty" +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") ) +func isTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + const ( foregroundBlue = 0x1 foregroundGreen = 0x2 @@ -52,45 +95,28 @@ type consoleScreenBufferInfo struct { maximumWindowSize coord } -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") - procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") - procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") - procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") -) - -type Writer struct { +type writer struct { out io.Writer handle syscall.Handle lastbuf bytes.Buffer oldattr word } -func NewColorable(file *os.File) io.Writer { +func newColorable(file *os.File) io.Writer { if file == nil { panic("nil passed instead of *os.File to NewColorable()") } - if isatty.IsTerminal(file.Fd()) { + if isTerminal(file.Fd()) { var csbi consoleScreenBufferInfo handle := syscall.Handle(file.Fd()) procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - return &Writer{out: file, handle: handle, oldattr: csbi.attributes} + return &writer{out: file, handle: handle, oldattr: csbi.attributes} } else { return file } } -func NewColorableStdout() io.Writer { - return NewColorable(os.Stdout) -} - -func NewColorableStderr() io.Writer { - return NewColorable(os.Stderr) -} - var color256 = map[int]int{ 0: 0x000000, 1: 0x800000, @@ -350,7 +376,7 @@ var color256 = map[int]int{ 255: 0xeeeeee, } -func (w *Writer) Write(data []byte) (n int, err error) { +func (w *writer) Write(data []byte) (n int, err error) { var csbi consoleScreenBufferInfo procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) diff --git a/vendor/github.com/onsi/ginkgo/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go similarity index 91% rename from vendor/github.com/onsi/ginkgo/formatter/formatter.go rename to vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 30d7cbe1..43b16211 100644 --- a/vendor/github.com/onsi/ginkgo/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -2,10 +2,15 @@ package formatter import ( "fmt" + "os" "regexp" "strings" ) +// ColorableStdOut and ColorableStdErr enable color output support on Windows +var ColorableStdOut = newColorable(os.Stdout) +var ColorableStdErr = newColorable(os.Stderr) + const COLS = 80 type ColorMode uint8 @@ -100,13 +105,13 @@ func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...i outLines = append(outLines, line) continue } - outWords := []string{} - length := uint(0) words := strings.Split(line, " ") - for _, word := range words { + outWords := []string{words[0]} + length := uint(f.length(words[0])) + for _, word := range words[1:] { wordLength := f.length(word) - if length+wordLength <= maxWidth { - length += wordLength + if length+wordLength+1 <= maxWidth { + length += wordLength + 1 outWords = append(outWords, word) continue } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go new file mode 100644 index 00000000..1beeb114 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -0,0 +1,45 @@ +package ginkgo + +import "github.com/onsi/ginkgo/v2/internal/testingtproxy" + +/* +GinkgoT() implements an interface analogous to *testing.T and can be used with +third-party libraries that accept *testing.T through an interface. + +GinkgoT() takes an optional offset argument that can be used to get the +correct line number associated with the failure. + +You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries +*/ +func GinkgoT(optionalOffset ...int) GinkgoTInterface { + offset := 3 + if len(optionalOffset) > 0 { + offset = optionalOffset[0] + } + return testingtproxy.New(GinkgoWriter, Fail, Skip, DeferCleanup, CurrentSpecReport, offset) +} + +/* +The interface returned by GinkgoT(). This covers most of the methods in the testing package's T. +*/ +type GinkgoTInterface interface { + Cleanup(func()) + Setenv(kev, value string) + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Helper() + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Parallel() + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + TempDir() string +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/counter.go b/vendor/github.com/onsi/ginkgo/v2/internal/counter.go new file mode 100644 index 00000000..712d85af --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/counter.go @@ -0,0 +1,9 @@ +package internal + +func MakeIncrementingIndexCounter() func() (int, error) { + idx := -1 + return func() (int, error) { + idx += 1 + return idx, nil + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go similarity index 61% rename from vendor/github.com/onsi/ginkgo/internal/failer/failer.go rename to vendor/github.com/onsi/ginkgo/v2/internal/failer.go index 678ea251..e9bd9565 100644 --- a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go @@ -1,32 +1,44 @@ -package failer +package internal import ( "fmt" "sync" - "github.com/onsi/ginkgo/types" + "github.com/onsi/ginkgo/v2/types" ) type Failer struct { lock *sync.Mutex - failure types.SpecFailure + failure types.Failure state types.SpecState } -func New() *Failer { +func NewFailer() *Failer { return &Failer{ lock: &sync.Mutex{}, state: types.SpecStatePassed, } } +func (f *Failer) GetState() types.SpecState { + f.lock.Lock() + defer f.lock.Unlock() + return f.state +} + +func (f *Failer) GetFailure() types.Failure { + f.lock.Lock() + defer f.lock.Unlock() + return f.failure +} + func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { f.lock.Lock() defer f.lock.Unlock() if f.state == types.SpecStatePassed { f.state = types.SpecStatePanicked - f.failure = types.SpecFailure{ + f.failure = types.Failure{ Message: "Test Panicked", Location: location, ForwardedPanic: fmt.Sprintf("%v", forwardedPanic), @@ -34,59 +46,54 @@ func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) } } -func (f *Failer) Timeout(location types.CodeLocation) { +func (f *Failer) Fail(message string, location types.CodeLocation) { f.lock.Lock() defer f.lock.Unlock() if f.state == types.SpecStatePassed { - f.state = types.SpecStateTimedOut - f.failure = types.SpecFailure{ - Message: "Timed out", + f.state = types.SpecStateFailed + f.failure = types.Failure{ + Message: message, Location: location, } } } -func (f *Failer) Fail(message string, location types.CodeLocation) { +func (f *Failer) Skip(message string, location types.CodeLocation) { f.lock.Lock() defer f.lock.Unlock() if f.state == types.SpecStatePassed { - f.state = types.SpecStateFailed - f.failure = types.SpecFailure{ + f.state = types.SpecStateSkipped + f.failure = types.Failure{ Message: message, Location: location, } } } -func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) { +func (f *Failer) AbortSuite(message string, location types.CodeLocation) { f.lock.Lock() defer f.lock.Unlock() - failure := f.failure - outcome := f.state - if outcome != types.SpecStatePassed { - failure.ComponentType = componentType - failure.ComponentIndex = componentIndex - failure.ComponentCodeLocation = componentCodeLocation + if f.state == types.SpecStatePassed { + f.state = types.SpecStateAborted + f.failure = types.Failure{ + Message: message, + Location: location, + } } - - f.state = types.SpecStatePassed - f.failure = types.SpecFailure{} - - return failure, outcome } -func (f *Failer) Skip(message string, location types.CodeLocation) { +func (f *Failer) Drain() (types.SpecState, types.Failure) { f.lock.Lock() defer f.lock.Unlock() - if f.state == types.SpecStatePassed { - f.state = types.SpecStateSkipped - f.failure = types.SpecFailure{ - Message: message, - Location: location, - } - } + failure := f.failure + outcome := f.state + + f.state = types.SpecStatePassed + f.failure = types.Failure{} + + return outcome, failure } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go new file mode 100644 index 00000000..966ea0c1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -0,0 +1,125 @@ +package internal + +import ( + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +/* + If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to + unmark the container's focus. This gives developers a more intuitive experience when debugging specs. + It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - + this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: + + As a common example, consider: + + FDescribe("something to debug", function() { + It("works", function() {...}) + It("works", function() {...}) + FIt("doesn't work", function() {...}) + It("works", function() {...}) + }) + + here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container. + The nested policy applied by this function enables this behavior. +*/ +func ApplyNestedFocusPolicyToTree(tree *TreeNode) { + var walkTree func(tree *TreeNode) bool + walkTree = func(tree *TreeNode) bool { + if tree.Node.MarkedPending { + return false + } + hasFocusedDescendant := false + for _, child := range tree.Children { + childHasFocus := walkTree(child) + hasFocusedDescendant = hasFocusedDescendant || childHasFocus + } + tree.Node.MarkedFocus = tree.Node.MarkedFocus && !hasFocusedDescendant + return tree.Node.MarkedFocus || hasFocusedDescendant + } + + walkTree(tree) +} + +/* + Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" + It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text + and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. + + If any of the CLI flags are provided they take precedence. The file filters run first followed by the regex filters. + + This function sets the `Skip` property on specs by applying Ginkgo's focus policy: + - If there are no CLI arguments and no programmatic focus, do nothing. + - If there are no CLI arguments but a spec somewhere has programmatic focus, skip any specs that have no programmatic focus. + - If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. + + *Note:* specs with pending nodes are Skipped when created by NewSpec. +*/ +func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { + focusString := strings.Join(suiteConfig.FocusStrings, "|") + skipString := strings.Join(suiteConfig.SkipStrings, "|") + + hasFocusCLIFlags := focusString != "" || skipString != "" || len(suiteConfig.SkipFiles) > 0 || len(suiteConfig.FocusFiles) > 0 || suiteConfig.LabelFilter != "" + + type SkipCheck func(spec Spec) bool + + // by default, skip any specs marked pending + skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }} + hasProgrammaticFocus := false + + if !hasFocusCLIFlags { + // check for programmatic focus + for _, spec := range specs { + if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { + skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) + hasProgrammaticFocus = true + break + } + } + } + + if suiteConfig.LabelFilter != "" { + labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) + }) + } + + if len(suiteConfig.FocusFiles) > 0 { + focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles) + skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) }) + } + + if len(suiteConfig.SkipFiles) > 0 { + skipFilters, _ := types.ParseFileFilters(suiteConfig.SkipFiles) + skipChecks = append(skipChecks, func(spec Spec) bool { return skipFilters.Matches(spec.Nodes.CodeLocations()) }) + } + + if focusString != "" { + // skip specs that don't match the focus string + re := regexp.MustCompile(focusString) + skipChecks = append(skipChecks, func(spec Spec) bool { return !re.MatchString(description + " " + spec.Text()) }) + } + + if skipString != "" { + // skip specs that match the skip string + re := regexp.MustCompile(skipString) + skipChecks = append(skipChecks, func(spec Spec) bool { return re.MatchString(description + " " + spec.Text()) }) + } + + // skip specs if shouldSkip() is true. note that we do nothing if shouldSkip() is false to avoid overwriting skip status established by the node's pending status + processedSpecs := Specs{} + for _, spec := range specs { + for _, skipCheck := range skipChecks { + if skipCheck(spec) { + spec.Skip = true + break + } + } + processedSpecs = append(processedSpecs, spec) + } + + return processedSpecs, hasProgrammaticFocus +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go new file mode 100644 index 00000000..f2c0fd89 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go @@ -0,0 +1,17 @@ +package global + +import ( + "github.com/onsi/ginkgo/v2/internal" +) + +var Suite *internal.Suite +var Failer *internal.Failer + +func init() { + InitializeGlobals() +} + +func InitializeGlobals() { + Failer = internal.NewFailer() + Suite = internal.NewSuite() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go new file mode 100644 index 00000000..c6546bba --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -0,0 +1,544 @@ +package internal + +import ( + "fmt" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type runOncePair struct { + //nodeId should only run once... + nodeID uint + nodeType types.NodeType + //...for specs in a hierarchy that includes this context + containerID uint +} + +func (pair runOncePair) isZero() bool { + return pair.nodeID == 0 +} + +func runOncePairForNode(node Node, containerID uint) runOncePair { + return runOncePair{ + nodeID: node.ID, + nodeType: node.NodeType, + containerID: containerID, + } +} + +type runOncePairs []runOncePair + +func runOncePairsForSpec(spec Spec) runOncePairs { + pairs := runOncePairs{} + + containers := spec.Nodes.WithType(types.NodeTypeContainer) + for _, node := range spec.Nodes { + if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) { + pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID)) + } else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered { + passedIntoAnOrderedContainer := false + firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool { + passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered + return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer + }) + if firstOrderedContainerDeeperThanNode.IsZero() { + continue + } + pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID)) + } + } + + return pairs +} + +func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair { + for i := range pairs { + if pairs[i].nodeID == nodeID { + return pairs[i] + } + } + return runOncePair{} +} + +func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool { + for i := range pairs { + if pairs[i] == pair { + return true + } + } + return false +} + +func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs { + count := 0 + for i := range pairs { + if pairs[i].nodeType.Is(nodeTypes) { + count++ + } + } + + out, j := make(runOncePairs, count), 0 + for i := range pairs { + if pairs[i].nodeType.Is(nodeTypes) { + out[j] = pairs[i] + j++ + } + } + return out +} + +type group struct { + suite *Suite + specs Specs + runOncePairs map[uint]runOncePairs + runOnceTracker map[runOncePair]types.SpecState + + succeeded bool +} + +func newGroup(suite *Suite) *group { + return &group{ + suite: suite, + runOncePairs: map[uint]runOncePairs{}, + runOnceTracker: map[runOncePair]types.SpecState{}, + succeeded: true, + } +} + +func (g *group) initialReportForSpec(spec Spec) types.SpecReport { + return types.SpecReport{ + ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), + ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), + ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), + LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, + LeafNodeType: types.NodeTypeIt, + LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, + LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), + ParallelProcess: g.suite.config.ParallelProcess, + IsSerial: spec.Nodes.HasNodeMarkedSerial(), + IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + } +} + +func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) { + if spec.Nodes.HasNodeMarkedPending() { + return types.SpecStatePending, types.Failure{} + } + if spec.Skip { + return types.SpecStateSkipped, types.Failure{} + } + if g.suite.interruptHandler.Status().Interrupted || g.suite.skipAll { + return types.SpecStateSkipped, types.Failure{} + } + if !g.succeeded { + return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + "Spec skipped because an earlier spec in an ordered container failed") + } + beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach) + for _, pair := range beforeOncePairs { + if g.runOnceTracker[pair].Is(types.SpecStateSkipped) { + return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType)) + } + } + if g.suite.config.DryRun { + return types.SpecStatePassed, types.Failure{} + } + return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure +} + +func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool { + lastSpecID := uint(0) + for idx := range g.specs { + if g.specs[idx].Skip { + continue + } + sID := g.specs[idx].SubjectID() + if g.runOncePairs[sID].hasRunOncePair(pair) { + lastSpecID = sID + } + } + return lastSpecID == specID +} + +func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { + interruptStatus := g.suite.interruptHandler.Status() + + pairs := g.runOncePairs[spec.SubjectID()] + + nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll) + nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel() + nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...) + nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt)) + terminatingNode, terminatingPair := Node{}, runOncePair{} + + for _, node := range nodes { + oncePair := pairs.runOncePairFor(node.ID) + if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) { + continue + } + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, interruptStatus.Channel, spec.Nodes.BestTextFor(node)) + g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) + if !oncePair.isZero() { + g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State + } + if g.suite.currentSpecReport.State != types.SpecStatePassed { + terminatingNode, terminatingPair = node, oncePair + break + } + } + + afterNodeWasRun := map[uint]bool{} + includeDeferCleanups := false + for { + nodes := spec.Nodes.WithType(types.NodeTypeAfterEach) + nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel() + nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...) + if !terminatingNode.IsZero() { + nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel) + } + if includeDeferCleanups { + nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...) + nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...) + } + nodes = nodes.Filter(func(node Node) bool { + if afterNodeWasRun[node.ID] { + //this node has already been run on this attempt, don't rerun it + return false + } + pair := runOncePair{} + switch node.NodeType { + case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll: + // check if we were generated in an AfterNode that has already run + if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] { + return true // we were, so we should definitely run this cleanup now + } + // looks like this cleanup nodes was generated by a before node or it. + // the run-once status of a cleanup node is governed by the run-once status of its generator + pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated) + default: + pair = pairs.runOncePairFor(node.ID) + } + if pair.isZero() { + // this node is not governed by any run-once policy, we should run it + return true + } + // it's our last chance to run if we're the last spec for our oncePair + isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair) + + switch g.suite.currentSpecReport.State { + case types.SpecStatePassed: //this attempt is passing... + return isLastSpecWithPair //...we should run-once if we'this is our last chance + case types.SpecStateSkipped: //the spec was skipped by the user... + if isLastSpecWithPair { + return true //...we're the last spec, so we should run the AfterNode + } + if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel { + return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run + } + case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... + if isFinalAttempt { + return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run + } + if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again + if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) { + return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it + } else { + return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level + } + } + case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted + return true //...that means the test run is over and we should clean up the stack. Run the AfterNode + } + return false + }) + + if len(nodes) == 0 && includeDeferCleanups { + break + } + + for _, node := range nodes { + afterNodeWasRun[node.ID] = true + state, failure := g.suite.runNode(node, g.suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(node)) + g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) + if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { + g.suite.currentSpecReport.State = state + g.suite.currentSpecReport.Failure = failure + } + } + includeDeferCleanups = true + } + +} + +func (g *group) run(specs Specs) { + g.specs = specs + for _, spec := range g.specs { + g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec) + } + + for _, spec := range g.specs { + g.suite.currentSpecReport = g.initialReportForSpec(spec) + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec) + g.suite.reporter.WillRun(g.suite.currentSpecReport) + g.suite.reportEach(spec, types.NodeTypeReportBeforeEach) + + skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending) + + g.suite.currentSpecReport.StartTime = time.Now() + if !skip { + maxAttempts := max(1, spec.FlakeAttempts()) + if g.suite.config.FlakeAttempts > 0 { + maxAttempts = g.suite.config.FlakeAttempts + } + for attempt := 0; attempt < maxAttempts; attempt++ { + g.suite.currentSpecReport.NumAttempts = attempt + 1 + g.suite.writer.Truncate() + g.suite.outputInterceptor.StartInterceptingOutput() + if attempt > 0 { + fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) + } + + g.attemptSpec(attempt == maxAttempts-1, spec) + + g.suite.currentSpecReport.EndTime = time.Now() + g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime) + g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes()) + g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput() + + if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { + break + } + } + } + + g.suite.reportEach(spec, types.NodeTypeReportAfterEach) + g.suite.processCurrentSpecReport() + if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + g.succeeded = false + } + g.suite.currentSpecReport = types.SpecReport{} + } +} + +func (g *group) oldRun(specs Specs) { + var suite = g.suite + nodeState := map[uint]types.SpecState{} + groupSucceeded := true + + indexOfLastSpecContainingNodeID := func(id uint) int { + lastIdx := -1 + for idx := range specs { + if specs[idx].Nodes.ContainsNodeID(id) && !specs[idx].Skip { + lastIdx = idx + } + } + return lastIdx + } + + for i, spec := range specs { + suite.currentSpecReport = types.SpecReport{ + ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), + ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), + ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), + LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, + LeafNodeType: types.NodeTypeIt, + LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, + LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), + ParallelProcess: suite.config.ParallelProcess, + IsSerial: spec.Nodes.HasNodeMarkedSerial(), + IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + } + + skip := spec.Skip + if spec.Nodes.HasNodeMarkedPending() { + skip = true + suite.currentSpecReport.State = types.SpecStatePending + } else { + if suite.interruptHandler.Status().Interrupted || suite.skipAll { + skip = true + } + if !groupSucceeded { + skip = true + suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + "Spec skipped because an earlier spec in an ordered container failed") + } + for _, node := range spec.Nodes.WithType(types.NodeTypeBeforeAll) { + if nodeState[node.ID] == types.SpecStateSkipped { + skip = true + suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + "Spec skipped because Skip() was called in BeforeAll") + break + } + } + if skip { + suite.currentSpecReport.State = types.SpecStateSkipped + } + } + + if suite.config.DryRun && !skip { + skip = true + suite.currentSpecReport.State = types.SpecStatePassed + } + + suite.reporter.WillRun(suite.currentSpecReport) + //send the spec report to any attached ReportBeforeEach blocks - this will update suite.currentSpecReport if failures occur in these blocks + suite.reportEach(spec, types.NodeTypeReportBeforeEach) + if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + //the reportEach failed, skip this spec + skip = true + } + + suite.currentSpecReport.StartTime = time.Now() + maxAttempts := max(1, spec.FlakeAttempts()) + if suite.config.FlakeAttempts > 0 { + maxAttempts = suite.config.FlakeAttempts + } + + for attempt := 0; !skip && (attempt < maxAttempts); attempt++ { + suite.currentSpecReport.NumAttempts = attempt + 1 + suite.writer.Truncate() + suite.outputInterceptor.StartInterceptingOutput() + if attempt > 0 { + fmt.Fprintf(suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) + } + isFinalAttempt := (attempt == maxAttempts-1) + + interruptStatus := suite.interruptHandler.Status() + deepestNestingLevelAttained := -1 + var nodes = spec.Nodes.WithType(types.NodeTypeBeforeAll).Filter(func(n Node) bool { + return nodeState[n.ID] != types.SpecStatePassed + }) + nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel() + nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...) + nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeIt)...) + + var terminatingNode Node + for j := range nodes { + deepestNestingLevelAttained = max(deepestNestingLevelAttained, nodes[j].NestingLevel) + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(nodes[j], interruptStatus.Channel, spec.Nodes.BestTextFor(nodes[j])) + suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime) + nodeState[nodes[j].ID] = suite.currentSpecReport.State + if suite.currentSpecReport.State != types.SpecStatePassed { + terminatingNode = nodes[j] + break + } + } + + afterAllNodesThatRan := map[uint]bool{} + // pull out some shared code so we aren't repeating ourselves down below. this just runs after and cleanup nodes + runAfterAndCleanupNodes := func(nodes Nodes) { + for j := range nodes { + state, failure := suite.runNode(nodes[j], suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(nodes[j])) + suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime) + nodeState[nodes[j].ID] = state + if suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { + suite.currentSpecReport.State = state + suite.currentSpecReport.Failure = failure + if state != types.SpecStatePassed { + terminatingNode = nodes[j] + } + } + if nodes[j].NodeType.Is(types.NodeTypeAfterAll) { + afterAllNodesThatRan[nodes[j].ID] = true + } + } + } + + // pull out a helper that captures the logic of whether or not we should run a given After node. + // there is complexity here stemming from the fact that we allow nested ordered contexts and flakey retries + shouldRunAfterNode := func(n Node) bool { + if n.NodeType.Is(types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { + return true + } + var id uint + if n.NodeType.Is(types.NodeTypeAfterAll) { + id = n.ID + if afterAllNodesThatRan[id] { //we've already run on this attempt. don't run again. + return false + } + } + if n.NodeType.Is(types.NodeTypeCleanupAfterAll) { + id = n.NodeIDWhereCleanupWasGenerated + } + isLastSpecWithNode := indexOfLastSpecContainingNodeID(id) == i + + switch suite.currentSpecReport.State { + case types.SpecStatePassed: //we've passed so far... + return isLastSpecWithNode //... and we're the last spec with this AfterNode, so we should run it + case types.SpecStateSkipped: //the spec was skipped by the user... + if isLastSpecWithNode { + return true //...we're the last spec, so we should run the AfterNode + } + if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) && terminatingNode.NestingLevel == n.NestingLevel { + return true //...or, a BeforeAll was skipped and it's at our nesting level, so our subgroup is going to skip + } + case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... + if isFinalAttempt { + return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run + } + if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) { + //...we'll be rerunning a BeforeAll so we should cleanup after it if... + if n.NodeType.Is(types.NodeTypeAfterAll) && terminatingNode.NestingLevel == n.NestingLevel { + return true //we're at the same nesting level + } + if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated { + return true //we're a DeferCleanup generated by it + } + } + if terminatingNode.NodeType.Is(types.NodeTypeAfterAll) { + //...we'll be rerunning an AfterAll so we should cleanup after it if... + if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated { + return true //we're a DeferCleanup generated by it + } + } + case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted + return true //...that means the test run is over and we should clean up the stack. Run the AfterNode + } + return false + } + + // first pass - run all the JustAfterEach, Aftereach, and AfterAlls. Our shoudlRunAfterNode filter function will clean up the AfterAlls for us. + afterNodes := spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel() + afterNodes = afterNodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterEach).CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()...) + afterNodes = afterNodes.WithinNestingLevel(deepestNestingLevelAttained) + afterNodes = afterNodes.Filter(shouldRunAfterNode) + runAfterAndCleanupNodes(afterNodes) + + // second-pass perhaps we didn't run the AfterAlls but a state change due to an AfterEach now requires us to run the AfterAlls: + afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode) + runAfterAndCleanupNodes(afterNodes) + + // now we run any DeferCleanups + afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse() + afterNodes = append(afterNodes, suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Filter(shouldRunAfterNode).Reverse()...) + runAfterAndCleanupNodes(afterNodes) + + // third-pass, perhaps a DeferCleanup failed and now we need to run the AfterAlls. + afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode) + runAfterAndCleanupNodes(afterNodes) + + // and finally - running AfterAlls may have generated some new DeferCleanup nodes, let's run them to finish up + afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse().Filter(shouldRunAfterNode) + runAfterAndCleanupNodes(afterNodes) + + suite.currentSpecReport.EndTime = time.Now() + suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) + suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes()) + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + + if suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { + break + } + } + + //send the spec report to any attached ReportAfterEach blocks - this will update suite.currentSpecReport if failures occur in these blocks + suite.reportEach(spec, types.NodeTypeReportAfterEach) + suite.processCurrentSpecReport() + if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + groupSucceeded = false + } + suite.currentSpecReport = types.SpecReport{} + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go new file mode 100644 index 00000000..aca7d1c4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -0,0 +1,212 @@ +package interrupt_handler + +import ( + "fmt" + "os" + "os/signal" + "runtime" + "sync" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/internal/parallel_support" +) + +const TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION = 30 * time.Second +const TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT = 10 +const ABORT_POLLING_INTERVAL = 500 * time.Millisecond +const ABORT_REPEAT_INTERRUPT_DURATION = 30 * time.Second + +type InterruptCause uint + +const ( + InterruptCauseInvalid InterruptCause = iota + + InterruptCauseSignal + InterruptCauseTimeout + InterruptCauseAbortByOtherProcess +) + +func (ic InterruptCause) String() string { + switch ic { + case InterruptCauseSignal: + return "Interrupted by User" + case InterruptCauseTimeout: + return "Interrupted by Timeout" + case InterruptCauseAbortByOtherProcess: + return "Interrupted by Other Ginkgo Process" + } + return "INVALID_INTERRUPT_CAUSE" +} + +type InterruptStatus struct { + Interrupted bool + Channel chan interface{} + Cause InterruptCause +} + +type InterruptHandlerInterface interface { + Status() InterruptStatus + SetInterruptPlaceholderMessage(string) + ClearInterruptPlaceholderMessage() + InterruptMessageWithStackTraces() string +} + +type InterruptHandler struct { + c chan interface{} + lock *sync.Mutex + interrupted bool + interruptPlaceholderMessage string + interruptCause InterruptCause + client parallel_support.Client + stop chan interface{} +} + +func NewInterruptHandler(timeout time.Duration, client parallel_support.Client) *InterruptHandler { + handler := &InterruptHandler{ + c: make(chan interface{}), + lock: &sync.Mutex{}, + interrupted: false, + stop: make(chan interface{}), + client: client, + } + handler.registerForInterrupts(timeout) + return handler +} + +func (handler *InterruptHandler) Stop() { + close(handler.stop) +} + +func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) { + // os signal handling + signalChannel := make(chan os.Signal, 1) + signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM) + + // timeout handling + var timeoutChannel <-chan time.Time + var timeoutTimer *time.Timer + if timeout > 0 { + timeoutTimer = time.NewTimer(timeout) + timeoutChannel = timeoutTimer.C + } + + // cross-process abort handling + var abortChannel chan bool + if handler.client != nil { + abortChannel = make(chan bool) + go func() { + pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) + for { + select { + case <-pollTicker.C: + if handler.client.ShouldAbort() { + abortChannel <- true + pollTicker.Stop() + return + } + case <-handler.stop: + pollTicker.Stop() + return + } + } + }() + } + + // listen for any interrupt signals + // note that some (timeouts, cross-process aborts) will only trigger once + // for these we set up a ticker to keep interrupting the suite until it ends + // this ensures any `AfterEach` or `AfterSuite`s that get stuck cleaning up + // get interrupted eventually + go func() { + var interruptCause InterruptCause + var repeatChannel <-chan time.Time + var repeatTicker *time.Ticker + for { + select { + case <-signalChannel: + interruptCause = InterruptCauseSignal + case <-timeoutChannel: + interruptCause = InterruptCauseTimeout + repeatInterruptTimeout := timeout / time.Duration(TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT) + if repeatInterruptTimeout > TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION { + repeatInterruptTimeout = TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION + } + timeoutTimer.Stop() + repeatTicker = time.NewTicker(repeatInterruptTimeout) + repeatChannel = repeatTicker.C + case <-abortChannel: + interruptCause = InterruptCauseAbortByOtherProcess + repeatTicker = time.NewTicker(ABORT_REPEAT_INTERRUPT_DURATION) + repeatChannel = repeatTicker.C + case <-repeatChannel: + //do nothing, just interrupt again using the same interruptCause + case <-handler.stop: + if timeoutTimer != nil { + timeoutTimer.Stop() + } + if repeatTicker != nil { + repeatTicker.Stop() + } + signal.Stop(signalChannel) + return + } + handler.lock.Lock() + handler.interruptCause = interruptCause + if handler.interruptPlaceholderMessage != "" { + fmt.Println(handler.interruptPlaceholderMessage) + } + handler.interrupted = true + close(handler.c) + handler.c = make(chan interface{}) + handler.lock.Unlock() + } + }() +} + +func (handler *InterruptHandler) Status() InterruptStatus { + handler.lock.Lock() + defer handler.lock.Unlock() + + return InterruptStatus{ + Interrupted: handler.interrupted, + Channel: handler.c, + Cause: handler.interruptCause, + } +} + +func (handler *InterruptHandler) SetInterruptPlaceholderMessage(message string) { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.interruptPlaceholderMessage = message +} + +func (handler *InterruptHandler) ClearInterruptPlaceholderMessage() { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.interruptPlaceholderMessage = "" +} + +func (handler *InterruptHandler) InterruptMessageWithStackTraces() string { + handler.lock.Lock() + out := fmt.Sprintf("%s\n\n", handler.interruptCause.String()) + defer handler.lock.Unlock() + if handler.interruptCause == InterruptCauseAbortByOtherProcess { + return out + } + out += "Here's a stack trace of all running goroutines:\n" + buf := make([]byte, 8192) + for { + n := runtime.Stack(buf, true) + if n < len(buf) { + buf = buf[:n] + break + } + buf = make([]byte, 2*len(buf)) + } + out += formatter.Fi(1, "%s", string(buf)) + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go new file mode 100644 index 00000000..bf0de496 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go @@ -0,0 +1,15 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package interrupt_handler + +import ( + "os" + "os/signal" + "syscall" +) + +func SwallowSigQuit() { + c := make(chan os.Signal, 1024) + signal.Notify(c, syscall.SIGQUIT) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go new file mode 100644 index 00000000..fcf8da83 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package interrupt_handler + +func SwallowSigQuit() { + //noop +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go new file mode 100644 index 00000000..289e4dde --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -0,0 +1,660 @@ +package internal + +import ( + "fmt" + "reflect" + "sort" + + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +var _global_node_id_counter = uint(0) +var _global_id_mutex = &sync.Mutex{} + +func UniqueNodeID() uint { + //There's a reace in the internal integration tests if we don't make + //accessing _global_node_id_counter safe across goroutines. + _global_id_mutex.Lock() + defer _global_id_mutex.Unlock() + _global_node_id_counter += 1 + return _global_node_id_counter +} + +type Node struct { + ID uint + NodeType types.NodeType + + Text string + Body func() + CodeLocation types.CodeLocation + NestingLevel int + + SynchronizedBeforeSuiteProc1Body func() []byte + SynchronizedBeforeSuiteAllProcsBody func([]byte) + + SynchronizedAfterSuiteAllProcsBody func() + SynchronizedAfterSuiteProc1Body func() + + ReportEachBody func(types.SpecReport) + ReportAfterSuiteBody func(types.Report) + + MarkedFocus bool + MarkedPending bool + MarkedSerial bool + MarkedOrdered bool + MarkedOncePerOrdered bool + FlakeAttempts int + Labels Labels + + NodeIDWhereCleanupWasGenerated uint +} + +// Decoration Types +type focusType bool +type pendingType bool +type serialType bool +type orderedType bool +type honorsOrderedType bool + +const Focus = focusType(true) +const Pending = pendingType(true) +const Serial = serialType(true) +const Ordered = orderedType(true) +const OncePerOrdered = honorsOrderedType(true) + +type FlakeAttempts uint +type Offset uint +type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing +type Labels []string + +func UnionOfLabels(labels ...Labels) Labels { + out := Labels{} + seen := map[string]bool{} + for _, labelSet := range labels { + for _, label := range labelSet { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + return out +} + +func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) { + decorations := []interface{}{} + remainingArgs := []interface{}{} + for _, arg := range args { + if isDecoration(arg) { + decorations = append(decorations, arg) + } else { + remainingArgs = append(remainingArgs, arg) + } + } + return decorations, remainingArgs +} + +func isDecoration(arg interface{}) bool { + switch t := reflect.TypeOf(arg); { + case t == nil: + return false + case t == reflect.TypeOf(Offset(0)): + return true + case t == reflect.TypeOf(types.CodeLocation{}): + return true + case t == reflect.TypeOf(Focus): + return true + case t == reflect.TypeOf(Pending): + return true + case t == reflect.TypeOf(Serial): + return true + case t == reflect.TypeOf(Ordered): + return true + case t == reflect.TypeOf(OncePerOrdered): + return true + case t == reflect.TypeOf(FlakeAttempts(0)): + return true + case t == reflect.TypeOf(Labels{}): + return true + case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): + return true + default: + return false + } +} + +func isSliceOfDecorations(slice interface{}) bool { + vSlice := reflect.ValueOf(slice) + if vSlice.Len() == 0 { + return false + } + for i := 0; i < vSlice.Len(); i++ { + if !isDecoration(vSlice.Index(i).Interface()) { + return false + } + } + return true +} + +func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) { + baseOffset := 2 + node := Node{ + ID: UniqueNodeID(), + NodeType: nodeType, + Text: text, + Labels: Labels{}, + CodeLocation: types.NewCodeLocation(baseOffset), + NestingLevel: -1, + } + errors := []error{} + appendError := func(err error) { + if err != nil { + errors = append(errors, err) + } + } + + args = unrollInterfaceSlice(args) + + remainingArgs := []interface{}{} + //First get the CodeLocation up-to-date + for _, arg := range args { + switch v := arg.(type) { + case Offset: + node.CodeLocation = types.NewCodeLocation(baseOffset + int(v)) + case types.CodeLocation: + node.CodeLocation = v + default: + remainingArgs = append(remainingArgs, arg) + } + } + + labelsSeen := map[string]bool{} + trackedFunctionError := false + args = remainingArgs + remainingArgs = []interface{}{} + //now process the rest of the args + for _, arg := range args { + + switch t := reflect.TypeOf(arg); { + case t == reflect.TypeOf(float64(0)): + break //ignore deprecated timeouts + case t == reflect.TypeOf(Focus): + node.MarkedFocus = bool(arg.(focusType)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Focus")) + } + case t == reflect.TypeOf(Pending): + node.MarkedPending = bool(arg.(pendingType)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Pending")) + } + case t == reflect.TypeOf(Serial): + node.MarkedSerial = bool(arg.(serialType)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial")) + } + case t == reflect.TypeOf(Ordered): + node.MarkedOrdered = bool(arg.(orderedType)) + if !nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered")) + } + case t == reflect.TypeOf(OncePerOrdered): + node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType)) + if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered")) + } + case t == reflect.TypeOf(FlakeAttempts(0)): + node.FlakeAttempts = int(arg.(FlakeAttempts)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts")) + } + case t == reflect.TypeOf(Labels{}): + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) + } + for _, label := range arg.(Labels) { + if !labelsSeen[label] { + labelsSeen[label] = true + label, err := types.ValidateAndCleanupLabel(label, node.CodeLocation) + node.Labels = append(node.Labels, label) + appendError(err) + } + } + case t.Kind() == reflect.Func: + if node.Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + isValid := (t.NumOut() == 0) && (t.NumIn() <= 1) && (t.NumIn() == 0 || t.In(0) == reflect.TypeOf(make(Done))) + if !isValid { + appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if t.NumIn() == 0 { + node.Body = arg.(func()) + } else { + deprecationTracker.TrackDeprecation(types.Deprecations.Async(), node.CodeLocation) + deprecatedAsyncBody := arg.(func(Done)) + node.Body = func() { deprecatedAsyncBody(make(Done)) } + } + default: + remainingArgs = append(remainingArgs, arg) + } + } + + //validations + if node.MarkedPending && node.MarkedFocus { + appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) + } + + if node.Body == nil && !node.MarkedPending && !trackedFunctionError { + appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) + } + for _, arg := range remainingArgs { + appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg)) + } + + if len(errors) > 0 { + return Node{}, errors + } + + return node, errors +} + +func NewSynchronizedBeforeSuiteNode(proc1Body func() []byte, allProcsBody func([]byte), codeLocation types.CodeLocation) (Node, []error) { + return Node{ + ID: UniqueNodeID(), + NodeType: types.NodeTypeSynchronizedBeforeSuite, + SynchronizedBeforeSuiteProc1Body: proc1Body, + SynchronizedBeforeSuiteAllProcsBody: allProcsBody, + CodeLocation: codeLocation, + }, nil +} + +func NewSynchronizedAfterSuiteNode(allProcsBody func(), proc1Body func(), codeLocation types.CodeLocation) (Node, []error) { + return Node{ + ID: UniqueNodeID(), + NodeType: types.NodeTypeSynchronizedAfterSuite, + SynchronizedAfterSuiteAllProcsBody: allProcsBody, + SynchronizedAfterSuiteProc1Body: proc1Body, + CodeLocation: codeLocation, + }, nil +} + +func NewReportBeforeEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) { + return Node{ + ID: UniqueNodeID(), + NodeType: types.NodeTypeReportBeforeEach, + ReportEachBody: body, + CodeLocation: codeLocation, + NestingLevel: -1, + }, nil +} + +func NewReportAfterEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) { + return Node{ + ID: UniqueNodeID(), + NodeType: types.NodeTypeReportAfterEach, + ReportEachBody: body, + CodeLocation: codeLocation, + NestingLevel: -1, + }, nil +} + +func NewReportAfterSuiteNode(text string, body func(types.Report), codeLocation types.CodeLocation) (Node, []error) { + return Node{ + ID: UniqueNodeID(), + Text: text, + NodeType: types.NodeTypeReportAfterSuite, + ReportAfterSuiteBody: body, + CodeLocation: codeLocation, + }, nil +} + +func NewCleanupNode(fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { + baseOffset := 2 + node := Node{ + ID: UniqueNodeID(), + NodeType: types.NodeTypeCleanupInvalid, + CodeLocation: types.NewCodeLocation(baseOffset), + NestingLevel: -1, + } + remainingArgs := []interface{}{} + for _, arg := range args { + switch t := reflect.TypeOf(arg); { + case t == reflect.TypeOf(Offset(0)): + node.CodeLocation = types.NewCodeLocation(baseOffset + int(arg.(Offset))) + case t == reflect.TypeOf(types.CodeLocation{}): + node.CodeLocation = arg.(types.CodeLocation) + default: + remainingArgs = append(remainingArgs, arg) + } + } + + if len(remainingArgs) == 0 { + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)} + } + callback := reflect.ValueOf(remainingArgs[0]) + if !(callback.Kind() == reflect.Func && callback.Type().NumOut() <= 1) { + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)} + } + callArgs := []reflect.Value{} + for _, arg := range remainingArgs[1:] { + callArgs = append(callArgs, reflect.ValueOf(arg)) + } + cl := node.CodeLocation + node.Body = func() { + out := callback.Call(callArgs) + if len(out) == 1 && !out[0].IsNil() { + fail(fmt.Sprintf("DeferCleanup callback returned error: %v", out[0]), cl) + } + } + + return node, nil +} + +func (n Node) IsZero() bool { + return n.ID == 0 +} + +/* Nodes */ +type Nodes []Node + +func (n Nodes) CopyAppend(nodes ...Node) Nodes { + numN := len(n) + out := make(Nodes, numN+len(nodes)) + for i, node := range n { + out[i] = node + } + for j, node := range nodes { + out[numN+j] = node + } + return out +} + +func (n Nodes) SplitAround(pivot Node) (Nodes, Nodes) { + pivotIdx := len(n) + for i := range n { + if n[i].ID == pivot.ID { + pivotIdx = i + break + } + } + left := n[:pivotIdx] + right := Nodes{} + if pivotIdx+1 < len(n) { + right = n[pivotIdx+1:] + } + + return left, right +} + +func (n Nodes) FirstNodeWithType(nodeTypes types.NodeType) Node { + for i := range n { + if n[i].NodeType.Is(nodeTypes) { + return n[i] + } + } + return Node{} +} + +func (n Nodes) WithType(nodeTypes types.NodeType) Nodes { + count := 0 + for i := range n { + if n[i].NodeType.Is(nodeTypes) { + count++ + } + } + + out, j := make(Nodes, count), 0 + for i := range n { + if n[i].NodeType.Is(nodeTypes) { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) WithoutType(nodeTypes types.NodeType) Nodes { + count := 0 + for i := range n { + if !n[i].NodeType.Is(nodeTypes) { + count++ + } + } + + out, j := make(Nodes, count), 0 + for i := range n { + if !n[i].NodeType.Is(nodeTypes) { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) WithoutNode(nodeToExclude Node) Nodes { + idxToExclude := len(n) + for i := range n { + if n[i].ID == nodeToExclude.ID { + idxToExclude = i + break + } + } + if idxToExclude == len(n) { + return n + } + out, j := make(Nodes, len(n)-1), 0 + for i := range n { + if i == idxToExclude { + continue + } + out[j] = n[i] + j++ + } + return out +} + +func (n Nodes) Filter(filter func(Node) bool) Nodes { + trufa, count := make([]bool, len(n)), 0 + for i := range n { + if filter(n[i]) { + trufa[i] = true + count += 1 + } + } + out, j := make(Nodes, count), 0 + for i := range n { + if trufa[i] { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) FirstSatisfying(filter func(Node) bool) Node { + for i := range n { + if filter(n[i]) { + return n[i] + } + } + return Node{} +} + +func (n Nodes) WithinNestingLevel(deepestNestingLevel int) Nodes { + count := 0 + for i := range n { + if n[i].NestingLevel <= deepestNestingLevel { + count++ + } + } + out, j := make(Nodes, count), 0 + for i := range n { + if n[i].NestingLevel <= deepestNestingLevel { + out[j] = n[i] + j++ + } + } + return out +} + +func (n Nodes) SortedByDescendingNestingLevel() Nodes { + out := make(Nodes, len(n)) + copy(out, n) + sort.SliceStable(out, func(i int, j int) bool { + return out[i].NestingLevel > out[j].NestingLevel + }) + + return out +} + +func (n Nodes) SortedByAscendingNestingLevel() Nodes { + out := make(Nodes, len(n)) + copy(out, n) + sort.SliceStable(out, func(i int, j int) bool { + return out[i].NestingLevel < out[j].NestingLevel + }) + + return out +} + +func (n Nodes) FirstWithNestingLevel(level int) Node { + for i := range n { + if n[i].NestingLevel == level { + return n[i] + } + } + return Node{} +} + +func (n Nodes) Reverse() Nodes { + out := make(Nodes, len(n)) + for i := range n { + out[len(n)-1-i] = n[i] + } + return out +} + +func (n Nodes) Texts() []string { + out := make([]string, len(n)) + for i := range n { + out[i] = n[i].Text + } + return out +} + +func (n Nodes) Labels() [][]string { + out := make([][]string, len(n)) + for i := range n { + if n[i].Labels == nil { + out[i] = []string{} + } else { + out[i] = []string(n[i].Labels) + } + } + return out +} + +func (n Nodes) UnionOfLabels() []string { + out := []string{} + seen := map[string]bool{} + for i := range n { + for _, label := range n[i].Labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + return out +} + +func (n Nodes) CodeLocations() []types.CodeLocation { + out := make([]types.CodeLocation, len(n)) + for i := range n { + out[i] = n[i].CodeLocation + } + return out +} + +func (n Nodes) BestTextFor(node Node) string { + if node.Text != "" { + return node.Text + } + parentNestingLevel := node.NestingLevel - 1 + for i := range n { + if n[i].Text != "" && n[i].NestingLevel == parentNestingLevel { + return n[i].Text + } + } + + return "" +} + +func (n Nodes) ContainsNodeID(id uint) bool { + for i := range n { + if n[i].ID == id { + return true + } + } + return false +} + +func (n Nodes) HasNodeMarkedPending() bool { + for i := range n { + if n[i].MarkedPending { + return true + } + } + return false +} + +func (n Nodes) HasNodeMarkedFocus() bool { + for i := range n { + if n[i].MarkedFocus { + return true + } + } + return false +} + +func (n Nodes) HasNodeMarkedSerial() bool { + for i := range n { + if n[i].MarkedSerial { + return true + } + } + return false +} + +func (n Nodes) FirstNodeMarkedOrdered() Node { + for i := range n { + if n[i].MarkedOrdered { + return n[i] + } + } + return Node{} +} + +func unrollInterfaceSlice(args interface{}) []interface{} { + v := reflect.ValueOf(args) + if v.Kind() != reflect.Slice { + return []interface{}{args} + } + out := []interface{}{} + for i := 0; i < v.Len(); i++ { + el := reflect.ValueOf(v.Index(i).Interface()) + if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) { + out = append(out, unrollInterfaceSlice(el.Interface())...) + } else { + out = append(out, v.Index(i).Interface()) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go new file mode 100644 index 00000000..f4723a54 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -0,0 +1,121 @@ +package internal + +import ( + "math/rand" + "sort" + + "github.com/onsi/ginkgo/v2/types" +) + +type GroupedSpecIndices []SpecIndices +type SpecIndices []int + +func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) { + /* + Ginkgo has sophisticated suport for randomizing specs. Specs are guaranteed to have the same + order for a given seed across test runs. + + By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging + experience - specs within a given container run in the order they appear in the file. + + Developers can set -randomizeAllSpecs to shuffle _all_ specs. + + In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled. + + Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished. + */ + + // Seed a new random source based on thee configured random seed. + r := rand.New(rand.NewSource(suiteConfig.RandomSeed)) + + // first break things into execution groups + // a group represents a single unit of execution and is a collection of SpecIndices + // usually a group is just a single spec, however ordered containers must be preserved as a single group + executionGroupIDs := []uint{} + executionGroups := map[uint]SpecIndices{} + for idx, spec := range specs { + groupNode := spec.Nodes.FirstNodeMarkedOrdered() + if groupNode.IsZero() { + groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt) + } + executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx) + if len(executionGroups[groupNode.ID]) == 1 { + executionGroupIDs = append(executionGroupIDs, groupNode.ID) + } + } + + // now, we only shuffle all the execution groups if we're randomizing all specs, otherwise + // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs + shufflableGroupingIDs := []uint{} + shufflableGroupingIDToGroupIDs := map[uint][]uint{} + shufflableGroupingsIDToSortKeys := map[uint]string{} + + // for each execution group we're going to have to pick a node to represent how the + // execution group is grouped for shuffling: + nodeTypesToShuffle := types.NodeTypesForContainerAndIt + if suiteConfig.RandomizeAllSpecs { + nodeTypesToShuffle = types.NodeTypeIt + } + + //so, fo reach execution group: + for _, groupID := range executionGroupIDs { + // pick out a representative spec + representativeSpec := specs[executionGroups[groupID][0]] + + // and grab the node on the spec that will represent which shufflable group this execution group belongs tu + shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle) + + //add the execution group to its shufflable group + shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID) + + //and if it's the first one in + if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 { + // record the shuffleable group ID + shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID) + // and record the sort key to use + shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String() + } + } + + // now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id + sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool { + keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]] + keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]] + if keyA == keyB { + return shufflableGroupingIDs[i] < shufflableGroupingIDs[j] + } else { + return keyA < keyB + } + }) + + // now we permute the sorted shufflable grouping IDs and build the ordered Groups + orderedGroups := GroupedSpecIndices{} + permutation := r.Perm(len(shufflableGroupingIDs)) + for _, j := range permutation { + //let's get the execution group IDs for this shufflable group: + executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]] + // and we'll add their associated specindices to the orderedGroups slice: + for _, executionGroupID := range executionGroupIDsForJ { + orderedGroups = append(orderedGroups, executionGroups[executionGroupID]) + } + } + + // If we're running in series, we're done. + if suiteConfig.ParallelTotal == 1 { + return orderedGroups, GroupedSpecIndices{} + } + + // We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set. + // The parallelizable groups will run across all Ginkgo processes... + // ...the serial groups will only run on Process #1 after all other processes have exited. + parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{} + for _, specIndices := range orderedGroups { + if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() { + serialGroups = append(serialGroups, specIndices) + } else { + parallelizableGroups = append(parallelizableGroups, specIndices) + } + } + + return parallelizableGroups, serialGroups +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go new file mode 100644 index 00000000..6f7c386c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go @@ -0,0 +1,250 @@ +package internal + +import ( + "bytes" + "io" + "os" + "time" +) + +const BAILOUT_TIME = 1 * time.Second +const BAILOUT_MESSAGE = `Ginkgo detected an issue while intercepting output. + +When running in parallel, Ginkgo captures stdout and stderr output +and attaches it to the running spec. It looks like that process is getting +stuck for this suite. + +This usually happens if you, or a library you are using, spin up an external +process and set cmd.Stdout = os.Stdout and/or cmd.Stderr = os.Stderr. This +causes the external process to keep Ginkgo's output interceptor pipe open and +causes output interception to hang. + +Ginkgo has detected this and shortcircuited the capture process. The specs +will continue running after this message however output from the external +process that caused this issue will not be captured. + +You have several options to fix this. In preferred order they are: + +1. Pass GinkgoWriter instead of os.Stdout or os.Stderr to your process. +2. Ensure your process exits before the current spec completes. If your +process is long-lived and must cross spec boundaries, this option won't +work for you. +3. Pause Ginkgo's output interceptor befor starting your process and then +resume it after. Use PauseOutputInterception() and ResumeOutputInterception() +to do this. +4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will +turn off all output interception but allow specs to run in parallel without this +issue. You may miss important output if you do this including output from Go's +race detector. + +More details on issue #851 - https://github.com/onsi/ginkgo/issues/851 +` + +/* +The OutputInterceptor is used by to +intercept and capture all stdin and stderr output during a test run. +*/ +type OutputInterceptor interface { + StartInterceptingOutput() + StartInterceptingOutputAndForwardTo(io.Writer) + StopInterceptingAndReturnOutput() string + + PauseIntercepting() + ResumeIntercepting() + + Shutdown() +} + +type NoopOutputInterceptor struct{} + +func (interceptor NoopOutputInterceptor) StartInterceptingOutput() {} +func (interceptor NoopOutputInterceptor) StartInterceptingOutputAndForwardTo(io.Writer) {} +func (interceptor NoopOutputInterceptor) StopInterceptingAndReturnOutput() string { return "" } +func (interceptor NoopOutputInterceptor) PauseIntercepting() {} +func (interceptor NoopOutputInterceptor) ResumeIntercepting() {} +func (interceptor NoopOutputInterceptor) Shutdown() {} + +type pipePair struct { + reader *os.File + writer *os.File +} + +func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) { + for { + //make the next pipe... + pair := pipePair{} + pair.reader, pair.writer, _ = os.Pipe() + select { + //...and provide it to the next consumer (they are responsible for closing the files) + case pipeChannel <- pair: + continue + //...or close the files if we were told to shutdown + case <-shutdown: + pair.reader.Close() + pair.writer.Close() + return + } + } +} + +type interceptorImplementation interface { + CreateStdoutStderrClones() (*os.File, *os.File) + ConnectPipeToStdoutStderr(*os.File) + RestoreStdoutStderrFromClones(*os.File, *os.File) + ShutdownClones(*os.File, *os.File) +} + +type genericOutputInterceptor struct { + intercepting bool + + stdoutClone *os.File + stderrClone *os.File + pipe pipePair + + shutdown chan interface{} + emergencyBailout chan interface{} + pipeChannel chan pipePair + interceptedContent chan string + + forwardTo io.Writer + accumulatedOutput string + + implementation interceptorImplementation +} + +func (interceptor *genericOutputInterceptor) StartInterceptingOutput() { + interceptor.StartInterceptingOutputAndForwardTo(io.Discard) +} + +func (interceptor *genericOutputInterceptor) StartInterceptingOutputAndForwardTo(w io.Writer) { + if interceptor.intercepting { + return + } + interceptor.accumulatedOutput = "" + interceptor.forwardTo = w + interceptor.ResumeIntercepting() +} + +func (interceptor *genericOutputInterceptor) StopInterceptingAndReturnOutput() string { + if interceptor.intercepting { + interceptor.PauseIntercepting() + } + return interceptor.accumulatedOutput +} + +func (interceptor *genericOutputInterceptor) ResumeIntercepting() { + if interceptor.intercepting { + return + } + interceptor.intercepting = true + if interceptor.stdoutClone == nil { + interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones() + interceptor.shutdown = make(chan interface{}) + go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown) + } + + // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is tring to log to stdout and stderr) + // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running + interceptor.pipe = <-interceptor.pipeChannel + + interceptor.emergencyBailout = make(chan interface{}) + + //Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting + go func() { + buffer := &bytes.Buffer{} + destination := io.MultiWriter(buffer, interceptor.forwardTo) + copyFinished := make(chan interface{}) + reader := interceptor.pipe.reader + go func() { + io.Copy(destination, reader) + reader.Close() // close the read end of the pipe so we don't leak a file descriptor + close(copyFinished) + }() + select { + case <-copyFinished: + interceptor.interceptedContent <- buffer.String() + case <-interceptor.emergencyBailout: + interceptor.interceptedContent <- "" + } + }() + + interceptor.implementation.ConnectPipeToStdoutStderr(interceptor.pipe.writer) +} + +func (interceptor *genericOutputInterceptor) PauseIntercepting() { + if !interceptor.intercepting { + return + } + // first we have to close the write end of the pipe. To do this we have to close all file descriptors pointing + // to the write end. So that would be the pipewriter itself, and FD #1 and FD #2 if we've Dup2'd them + interceptor.pipe.writer.Close() // the pipewriter itself + + // we also need to stop intercepting. we do that by reconnecting the stdout and stderr file descriptions back to their respective #1 and #2 file descriptors; + // this also closes #1 and #2 before it points that their original stdout and stderr file descriptions + interceptor.implementation.RestoreStdoutStderrFromClones(interceptor.stdoutClone, interceptor.stderrClone) + + var content string + select { + case content = <-interceptor.interceptedContent: + case <-time.After(BAILOUT_TIME): + /* + By closing all the pipe writer's file descriptors associated with the pipe writer's file description the io.Copy reading from the reader + should eventually receive an EOF and exit. + + **However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process + will have a file descriptor pointing to the pipe writer's file desription and it will not close until the external process exits. + + That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever + content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine + and file descriptor (those these will be cleaned up when the process exits). + + We tack on a message to notify the user that they've hit this edgecase and encourage them to address it. + */ + close(interceptor.emergencyBailout) + content = <-interceptor.interceptedContent + BAILOUT_MESSAGE + } + + interceptor.accumulatedOutput += content + interceptor.intercepting = false +} + +func (interceptor *genericOutputInterceptor) Shutdown() { + interceptor.PauseIntercepting() + + if interceptor.stdoutClone != nil { + close(interceptor.shutdown) + interceptor.implementation.ShutdownClones(interceptor.stdoutClone, interceptor.stderrClone) + interceptor.stdoutClone = nil + interceptor.stderrClone = nil + } +} + +/* This is used on windows builds but included here so it can be explicitly tested on unix systems too */ +func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor { + return &genericOutputInterceptor{ + interceptedContent: make(chan string), + pipeChannel: make(chan pipePair), + shutdown: make(chan interface{}), + implementation: &osGlobalReassigningOutputInterceptorImpl{}, + } +} + +type osGlobalReassigningOutputInterceptorImpl struct{} + +func (impl *osGlobalReassigningOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) { + return os.Stdout, os.Stderr +} + +func (impl *osGlobalReassigningOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) { + os.Stdout = pipeWriter + os.Stderr = pipeWriter +} + +func (impl *osGlobalReassigningOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) { + os.Stdout = stdoutClone + os.Stderr = stderrClone +} + +func (impl *osGlobalReassigningOutputInterceptorImpl) ShutdownClones(_ *os.File, _ *os.File) { + //noop +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go new file mode 100644 index 00000000..e875001c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go @@ -0,0 +1,62 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package internal + +import ( + "os" + + "golang.org/x/sys/unix" +) + +func NewOutputInterceptor() OutputInterceptor { + return &genericOutputInterceptor{ + interceptedContent: make(chan string), + pipeChannel: make(chan pipePair), + shutdown: make(chan interface{}), + implementation: &dupSyscallOutputInterceptorImpl{}, + } +} + +type dupSyscallOutputInterceptorImpl struct{} + +func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) { + // To clone stdout and stderr we: + // First, create two clone file descriptors that point to the stdout and stderr file descriptions + stdoutCloneFD, _ := unix.Dup(1) + stderrCloneFD, _ := unix.Dup(2) + + // And then wrap the clone file descriptors in files. + // One benefit of this (that we don't use yet) is that we can actually write + // to these files to emit output to the console evne though we're intercepting output + stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone") + stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone") + + //these clones remain alive throughout the lifecycle of the suite and don't need to be recreated + //this speeds things up a bit, actually. + return stdoutClone, stderrClone +} + +func (impl *dupSyscallOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) { + // To redirect output to our pipe we need to point the 1 and 2 file descriptors (which is how the world tries to log things) + // to the write end of the pipe. + // We do this with Dup2 (possibly Dup3 on some architectures) to have file descriptors 1 and 2 point to the same file description as the pipeWriter + // This effectively shunts data written to stdout and stderr to the write end of our pipe + unix.Dup2(int(pipeWriter.Fd()), 1) + unix.Dup2(int(pipeWriter.Fd()), 2) +} + +func (impl *dupSyscallOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) { + // To restore stdour/stderr from the clones we have the 1 and 2 file descriptors + // point to the original file descriptions that we saved off in the clones. + // This has the added benefit of closing the connection between these descriptors and the write end of the pipe + // which is important to cause the io.Copy on the pipe.Reader to end. + unix.Dup2(int(stdoutClone.Fd()), 1) + unix.Dup2(int(stderrClone.Fd()), 2) +} + +func (impl *dupSyscallOutputInterceptorImpl) ShutdownClones(stdoutClone *os.File, stderrClone *os.File) { + // We're done with the clones so we can close them to clean up after ourselves + stdoutClone.Close() + stderrClone.Close() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go new file mode 100644 index 00000000..30c2851a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go @@ -0,0 +1,7 @@ +// +build windows + +package internal + +func NewOutputInterceptor() OutputInterceptor { + return NewOSGlobalReassigningOutputInterceptor() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go new file mode 100644 index 00000000..7d5cb0b6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -0,0 +1,69 @@ +package parallel_support + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type BeforeSuiteState struct { + Data []byte + State types.SpecState +} + +type ParallelIndexCounter struct { + Index int +} + +var ErrorGone = fmt.Errorf("gone") +var ErrorFailed = fmt.Errorf("failed") +var ErrorEarly = fmt.Errorf("early") + +var POLLING_INTERVAL = 50 * time.Millisecond + +type Server interface { + Start() + Close() + Address() string + RegisterAlive(node int, alive func() bool) + GetSuiteDone() chan interface{} + GetOutputDestination() io.Writer + SetOutputDestination(io.Writer) +} + +type Client interface { + Connect() bool + Close() error + + PostSuiteWillBegin(report types.Report) error + PostDidRun(report types.SpecReport) error + PostSuiteDidEnd(report types.Report) error + PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error + BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) + BlockUntilNonprimaryProcsHaveFinished() error + BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) + FetchNextCounter() (int, error) + PostAbort() error + ShouldAbort() bool + Write(p []byte) (int, error) +} + +func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpServer(parallelTotal, reporter) + } else { + return newRPCServer(parallelTotal, reporter) + } +} + +func NewClient(serverHost string) Client { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpClient(serverHost) + } else { + return newRPCClient(serverHost) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go new file mode 100644 index 00000000..d076d5d1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -0,0 +1,152 @@ +package parallel_support + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type httpClient struct { + serverHost string +} + +func newHttpClient(serverHost string) *httpClient { + return &httpClient{ + serverHost: serverHost, + } +} + +func (client *httpClient) Connect() bool { + resp, err := http.Get(client.serverHost + "/up") + if err != nil { + return false + } + resp.Body.Close() + return resp.StatusCode == http.StatusOK +} + +func (client *httpClient) Close() error { + return nil +} + +func (client *httpClient) post(path string, data interface{}) error { + var body io.Reader + if data != nil { + encoded, err := json.Marshal(data) + if err != nil { + return err + } + body = bytes.NewBuffer(encoded) + } + resp, err := http.Post(client.serverHost+path, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (client *httpClient) poll(path string, data interface{}) error { + for { + resp, err := http.Get(client.serverHost + path) + if err != nil { + return err + } + if resp.StatusCode == http.StatusTooEarly { + resp.Body.Close() + time.Sleep(POLLING_INTERVAL) + continue + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusGone { + return ErrorGone + } + if resp.StatusCode == http.StatusFailedDependency { + return ErrorFailed + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + if data != nil { + return json.NewDecoder(resp.Body).Decode(data) + } + return nil + } +} + +func (client *httpClient) PostSuiteWillBegin(report types.Report) error { + return client.post("/suite-will-begin", report) +} + +func (client *httpClient) PostDidRun(report types.SpecReport) error { + return client.post("/did-run", report) +} + +func (client *httpClient) PostSuiteDidEnd(report types.Report) error { + return client.post("/suite-did-end", report) +} + +func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.post("/before-suite-completed", beforeSuiteState) +} + +func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("/before-suite-state", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("/have-nonprimary-procs-finished", nil) +} + +func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("/aggregated-nonprimary-procs-report", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *httpClient) FetchNextCounter() (int, error) { + var counter ParallelIndexCounter + err := client.poll("/counter", &counter) + return counter.Index, err +} + +func (client *httpClient) PostAbort() error { + return client.post("/abort", nil) +} + +func (client *httpClient) ShouldAbort() bool { + err := client.poll("/abort", nil) + if err == ErrorGone { + return true + } + return false +} + +func (client *httpClient) Write(p []byte) (int, error) { + resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p)) + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return 0, fmt.Errorf("failed to emit output") + } + return len(p), err +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go new file mode 100644 index 00000000..ca1dcdca --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -0,0 +1,214 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "encoding/json" + "io" + "net" + "net/http" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type httpServer struct { + listener net.Listener + handler *ServerHandler +} + +//Create a new server, automatically selecting a port +func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &httpServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *httpServer) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin) + mux.HandleFunc("/did-run", server.didRun) + mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd) + mux.HandleFunc("/emit-output", server.emitOutput) + + //synchronization endpoints + mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) + mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) + mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) + mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/up", server.handleUp) + mux.HandleFunc("/abort", server.handleAbort) + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *httpServer) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *httpServer) Address() string { + return "http://" + server.listener.Addr().String() +} + +func (server *httpServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *httpServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *httpServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *httpServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} + +// +// Streaming Endpoints +// + +//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { + defer request.Body.Close() + if json.NewDecoder(request.Body).Decode(object) != nil { + writer.WriteHeader(http.StatusBadRequest) + return false + } + return true +} + +func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool { + if err == nil { + return false + } + switch err { + case ErrorEarly: + writer.WriteHeader(http.StatusTooEarly) + case ErrorGone: + writer.WriteHeader(http.StatusGone) + case ErrorFailed: + writer.WriteHeader(http.StatusFailedDependency) + default: + writer.WriteHeader(http.StatusInternalServerError) + } + return true +} + +func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer) +} + +func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) { + var report types.SpecReport + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.DidRun(report, voidReceiver), writer) +} + +func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer) +} + +func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) { + output, err := io.ReadAll(request.Body) + if err != nil { + writer.WriteHeader(http.StatusInternalServerError) + return + } + var n int + server.handleError(server.handler.EmitOutput(output, &n), writer) +} + +func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if !server.decode(writer, request, &beforeSuiteState) { + return + } + + server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer) +} + +func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) { + return + } + json.NewEncoder(writer).Encode(beforeSuiteState) +} + +func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) { + if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) { + return + } + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) { + var aggregatedReport types.Report + if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) { + return + } + json.NewEncoder(writer).Encode(aggregatedReport) +} + +func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) { + var n int + if server.handleError(server.handler.Counter(voidSender, &n), writer) { + return + } + json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n}) +} + +func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) { + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) { + if request.Method == "GET" { + var shouldAbort bool + server.handler.ShouldAbort(voidSender, &shouldAbort) + if shouldAbort { + writer.WriteHeader(http.StatusGone) + } else { + writer.WriteHeader(http.StatusOK) + } + } else { + server.handler.Abort(voidSender, voidReceiver) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go new file mode 100644 index 00000000..4e83b097 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -0,0 +1,119 @@ +package parallel_support + +import ( + "net/rpc" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type rpcClient struct { + serverHost string + client *rpc.Client +} + +func newRPCClient(serverHost string) *rpcClient { + return &rpcClient{ + serverHost: serverHost, + } +} + +func (client *rpcClient) Connect() bool { + var err error + if client.client != nil { + return true + } + client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/") + if err != nil { + client.client = nil + return false + } + return true +} + +func (client *rpcClient) Close() error { + return client.client.Close() +} + +func (client *rpcClient) poll(method string, data interface{}) error { + for { + err := client.client.Call(method, voidSender, data) + if err == nil { + return nil + } + switch err.Error() { + case ErrorEarly.Error(): + time.Sleep(POLLING_INTERVAL) + case ErrorGone.Error(): + return ErrorGone + case ErrorFailed.Error(): + return ErrorFailed + default: + return err + } + } +} + +func (client *rpcClient) PostSuiteWillBegin(report types.Report) error { + return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver) +} + +func (client *rpcClient) PostDidRun(report types.SpecReport) error { + return client.client.Call("Server.DidRun", report, voidReceiver) +} + +func (client *rpcClient) PostSuiteDidEnd(report types.Report) error { + return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver) +} + +func (client *rpcClient) Write(p []byte) (int, error) { + var n int + err := client.client.Call("Server.EmitOutput", p, &n) + return n, err +} + +func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver) +} + +func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("Server.BeforeSuiteState", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver) +} + +func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("Server.AggregatedNonprimaryProcsReport", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *rpcClient) FetchNextCounter() (int, error) { + var counter int + err := client.client.Call("Server.Counter", voidSender, &counter) + return counter, err +} + +func (client *rpcClient) PostAbort() error { + return client.client.Call("Server.Abort", voidSender, voidReceiver) +} + +func (client *rpcClient) ShouldAbort() bool { + var shouldAbort bool + client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort) + return shouldAbort +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go new file mode 100644 index 00000000..2620fd56 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go @@ -0,0 +1,75 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "io" + "net" + "net/http" + "net/rpc" + + "github.com/onsi/ginkgo/v2/reporters" +) + +/* +RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type RPCServer struct { + listener net.Listener + handler *ServerHandler +} + +//Create a new server, automatically selecting a port +func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &RPCServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *RPCServer) Start() { + rpcServer := rpc.NewServer() + rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server + + httpServer := &http.Server{} + httpServer.Handler = rpcServer + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *RPCServer) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *RPCServer) Address() string { + return server.listener.Addr().String() +} + +func (server *RPCServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *RPCServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *RPCServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *RPCServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go new file mode 100644 index 00000000..ca471cf3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -0,0 +1,202 @@ +package parallel_support + +import ( + "io" + "os" + "sync" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type Void struct{} + +var voidReceiver *Void = &Void{} +var voidSender Void + +// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server. +// It handles all the business logic to avoid duplication between the two servers + +type ServerHandler struct { + done chan interface{} + outputDestination io.Writer + reporter reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteState BeforeSuiteState + parallelTotal int + counter int + counterLock *sync.Mutex + shouldAbort bool + + numSuiteDidBegins int + numSuiteDidEnds int + aggregatedReport types.Report + reportHoldingArea []types.SpecReport +} + +func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { + return &ServerHandler{ + reporter: reporter, + lock: &sync.Mutex{}, + counterLock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + parallelTotal: parallelTotal, + outputDestination: os.Stdout, + done: make(chan interface{}), + } +} + +func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidBegins += 1 + + // all summaries are identical, so it's fine to simply emit the last one of these + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.SuiteWillBegin(report) + + for _, summary := range handler.reportHoldingArea { + handler.reporter.WillRun(summary) + handler.reporter.DidRun(summary) + } + + handler.reportHoldingArea = nil + } + + return nil +} + +func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.WillRun(report) + handler.reporter.DidRun(report) + } else { + handler.reportHoldingArea = append(handler.reportHoldingArea, report) + } + + return nil +} + +func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidEnds += 1 + if handler.numSuiteDidEnds == 1 { + handler.aggregatedReport = report + } else { + handler.aggregatedReport = handler.aggregatedReport.Add(report) + } + + if handler.numSuiteDidEnds == handler.parallelTotal { + handler.reporter.SuiteDidEnd(handler.aggregatedReport) + close(handler.done) + } + + return nil +} + +func (handler *ServerHandler) EmitOutput(output []byte, n *int) error { + var err error + *n, err = handler.outputDestination.Write(output) + return err +} + +func (handler *ServerHandler) registerAlive(proc int, alive func() bool) { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.alives[proc-1] = alive +} + +func (handler *ServerHandler) procIsAlive(proc int) bool { + handler.lock.Lock() + defer handler.lock.Unlock() + alive := handler.alives[proc-1] + if alive == nil { + return true + } + return alive() +} + +func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { + for i := 2; i <= handler.parallelTotal; i++ { + if handler.procIsAlive(i) { + return false + } + } + return true +} + +func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.beforeSuiteState = beforeSuiteState + + return nil +} + +func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.beforeSuiteState.State == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *beforeSuiteState = handler.beforeSuiteState + return nil +} + +func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error { + if handler.haveNonprimaryProcsFinished() { + return nil + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error { + if handler.haveNonprimaryProcsFinished() { + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.numSuiteDidEnds == handler.parallelTotal-1 { + *report = handler.aggregatedReport + return nil + } else { + return ErrorGone + } + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) Counter(_ Void, counter *int) error { + handler.counterLock.Lock() + defer handler.counterLock.Unlock() + *counter = handler.counter + handler.counter++ + return nil +} + +func (handler *ServerHandler) Abort(_ Void, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.shouldAbort = true + return nil +} + +func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error { + handler.lock.Lock() + defer handler.lock.Unlock() + *shouldAbort = handler.shouldAbort + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go new file mode 100644 index 00000000..74199f39 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go @@ -0,0 +1,40 @@ +package internal + +import ( + "reflect" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type ReportEntry = types.ReportEntry + +func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) { + out := ReportEntry{ + Visibility: types.ReportEntryVisibilityAlways, + Name: name, + Time: time.Now(), + Location: cl, + } + var didSetValue = false + for _, arg := range args { + switch reflect.TypeOf(arg) { + case reflect.TypeOf(types.ReportEntryVisibilityAlways): + out.Visibility = arg.(types.ReportEntryVisibility) + case reflect.TypeOf(types.CodeLocation{}): + out.Location = arg.(types.CodeLocation) + case reflect.TypeOf(Offset(0)): + out.Location = types.NewCodeLocation(2 + int(arg.(Offset))) + case reflect.TypeOf(out.Time): + out.Time = arg.(time.Time) + default: + if didSetValue { + return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg) + } + out.Value = types.WrapEntryValue(arg) + didSetValue = true + } + } + + return out, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go new file mode 100644 index 00000000..92072edd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go @@ -0,0 +1,71 @@ +package internal + +import ( + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +type Spec struct { + Nodes Nodes + Skip bool +} + +func (s Spec) SubjectID() uint { + return s.Nodes.FirstNodeWithType(types.NodeTypeIt).ID +} + +func (s Spec) Text() string { + texts := []string{} + for i := range s.Nodes { + if s.Nodes[i].Text != "" { + texts = append(texts, s.Nodes[i].Text) + } + } + return strings.Join(texts, " ") +} + +func (s Spec) FirstNodeWithType(nodeTypes types.NodeType) Node { + return s.Nodes.FirstNodeWithType(nodeTypes) +} + +func (s Spec) FlakeAttempts() int { + flakeAttempts := 0 + for i := range s.Nodes { + if s.Nodes[i].FlakeAttempts > 0 { + flakeAttempts = s.Nodes[i].FlakeAttempts + } + } + + return flakeAttempts +} + +type Specs []Spec + +func (s Specs) HasAnySpecsMarkedPending() bool { + for i := range s { + if s[i].Nodes.HasNodeMarkedPending() { + return true + } + } + + return false +} + +func (s Specs) CountWithoutSkip() int { + n := 0 + for i := range s { + if !s[i].Skip { + n += 1 + } + } + return n +} + +func (s Specs) AtIndices(indices SpecIndices) Specs { + out := make(Specs, len(indices)) + for i, idx := range indices { + out[i] = s[idx] + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go new file mode 100644 index 00000000..8b0d68ce --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -0,0 +1,634 @@ +package internal + +import ( + "fmt" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type Phase uint + +const ( + PhaseBuildTopLevel Phase = iota + PhaseBuildTree + PhaseRun +) + +type Suite struct { + tree *TreeNode + topLevelContainers Nodes + + phase Phase + + suiteNodes Nodes + cleanupNodes Nodes + + failer *Failer + reporter reporters.Reporter + writer WriterInterface + outputInterceptor OutputInterceptor + interruptHandler interrupt_handler.InterruptHandlerInterface + config types.SuiteConfig + + skipAll bool + report types.Report + currentSpecReport types.SpecReport + currentNode Node + + client parallel_support.Client +} + +func NewSuite() *Suite { + return &Suite{ + tree: &TreeNode{}, + phase: PhaseBuildTopLevel, + } +} + +func (suite *Suite) BuildTree() error { + // During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered + // We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree + suite.phase = PhaseBuildTree + for _, topLevelContainer := range suite.topLevelContainers { + err := suite.PushNode(topLevelContainer) + if err != nil { + return err + } + } + return nil +} + +func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) { + if suite.phase != PhaseBuildTree { + panic("cannot run before building the tree = call suite.BuildTree() first") + } + ApplyNestedFocusPolicyToTree(suite.tree) + specs := GenerateSpecsFromTreeRoot(suite.tree) + specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig) + + suite.phase = PhaseRun + suite.client = client + suite.failer = failer + suite.reporter = reporter + suite.writer = writer + suite.outputInterceptor = outputInterceptor + suite.interruptHandler = interruptHandler + suite.config = suiteConfig + + success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) + + return success, hasProgrammaticFocus +} + +/* + Tree Construction methods + + PushNode is used during PhaseBuildTopLevel and PhaseBuildTree +*/ + +func (suite *Suite) PushNode(node Node) error { + if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) { + return suite.pushCleanupNode(node) + } + + if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) { + return suite.pushSuiteNode(node) + } + + if suite.phase == PhaseRun { + return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation) + } + + if node.MarkedSerial { + firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() + if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial { + return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType) + } + } + + if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) { + firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() + if firstOrderedNode.IsZero() { + return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType) + } + } + + if node.NodeType == types.NodeTypeContainer { + // During PhaseBuildTopLevel we only track the top level containers without entering them + // We only enter the top level container nodes during PhaseBuildTree + // + // This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives + // the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs` + // is invoked. This makes the lifecycle easier to reason about and solves issues like #693. + if suite.phase == PhaseBuildTopLevel { + suite.topLevelContainers = append(suite.topLevelContainers, node) + return nil + } + if suite.phase == PhaseBuildTree { + parentTree := suite.tree + suite.tree = &TreeNode{Node: node} + parentTree.AppendChild(suite.tree) + err := func() (err error) { + defer func() { + if e := recover(); e != nil { + err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation) + } + }() + node.Body() + return err + }() + suite.tree = parentTree + return err + } + } else { + suite.tree.AppendChild(&TreeNode{Node: node}) + return nil + } + + return nil +} + +func (suite *Suite) pushSuiteNode(node Node) error { + if suite.phase == PhaseBuildTree { + return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation) + } + + if suite.phase == PhaseRun { + return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation) + } + + switch node.NodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: + existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite) + if len(existingBefores) > 0 { + return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation) + } + case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite) + if len(existingAfters) > 0 { + return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation) + } + } + + suite.suiteNodes = append(suite.suiteNodes, node) + return nil +} + +func (suite *Suite) pushCleanupNode(node Node) error { + if suite.phase != PhaseRun || suite.currentNode.IsZero() { + return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation) + } + + switch suite.currentNode.NodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + node.NodeType = types.NodeTypeCleanupAfterSuite + case types.NodeTypeBeforeAll, types.NodeTypeAfterAll: + node.NodeType = types.NodeTypeCleanupAfterAll + case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite: + return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType) + case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite: + return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation) + default: + node.NodeType = types.NodeTypeCleanupAfterEach + } + + node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID + node.NestingLevel = suite.currentNode.NestingLevel + suite.cleanupNodes = append(suite.cleanupNodes, node) + + return nil +} + +/* + Spec Running methods - used during PhaseRun +*/ +func (suite *Suite) CurrentSpecReport() types.SpecReport { + report := suite.currentSpecReport + if suite.writer != nil { + report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) + } + return report +} + +func (suite *Suite) AddReportEntry(entry ReportEntry) error { + if suite.phase != PhaseRun { + return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location) + } + suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry) + return nil +} + +func (suite *Suite) isRunningInParallel() bool { + return suite.config.ParallelTotal > 1 +} + +func (suite *Suite) processCurrentSpecReport() { + suite.reporter.DidRun(suite.currentSpecReport) + if suite.isRunningInParallel() { + suite.client.PostDidRun(suite.currentSpecReport) + } + suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport) + + if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + suite.report.SuiteSucceeded = false + if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) { + suite.skipAll = true + if suite.isRunningInParallel() { + suite.client.PostAbort() + } + } + } +} + +func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { + numSpecsThatWillBeRun := specs.CountWithoutSkip() + + suite.report = types.Report{ + SuitePath: suitePath, + SuiteDescription: description, + SuiteLabels: suiteLabels, + SuiteConfig: suite.config, + SuiteHasProgrammaticFocus: hasProgrammaticFocus, + PreRunStats: types.PreRunStats{ + TotalSpecs: len(specs), + SpecsThatWillRun: numSpecsThatWillBeRun, + }, + StartTime: time.Now(), + } + + suite.reporter.SuiteWillBegin(suite.report) + if suite.isRunningInParallel() { + suite.client.PostSuiteWillBegin(suite.report) + } + + suite.report.SuiteSucceeded = true + suite.runBeforeSuite(numSpecsThatWillBeRun) + + if suite.report.SuiteSucceeded { + groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config) + nextIndex := MakeIncrementingIndexCounter() + if suite.isRunningInParallel() { + nextIndex = suite.client.FetchNextCounter + } + + for { + groupedSpecIdx, err := nextIndex() + if err != nil { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error())) + suite.report.SuiteSucceeded = false + break + } + + if groupedSpecIdx >= len(groupedSpecIndices) { + if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 { + groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter() + suite.client.BlockUntilNonprimaryProcsHaveFinished() + continue + } + break + } + + // the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts + // we encapsulate that complexity in the notion of a Group that can run + // Group is really just an extension of suite so it gets passed a suite and has access to all its internals + // Note that group is stateful and intedned for single use! + newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx])) + } + + if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set") + suite.report.SuiteSucceeded = false + } + } + + suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) + + interruptStatus := suite.interruptHandler.Status() + if interruptStatus.Interrupted { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String()) + suite.report.SuiteSucceeded = false + } + suite.report.EndTime = time.Now() + suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime) + + if suite.config.ParallelProcess == 1 { + suite.runReportAfterSuite() + } + suite.reporter.SuiteDidEnd(suite.report) + if suite.isRunningInParallel() { + suite.client.PostSuiteDidEnd(suite.report) + } + + return suite.report.SuiteSucceeded +} + +func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { + interruptStatus := suite.interruptHandler.Status() + beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite) + if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 { + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: beforeSuiteNode.NodeType, + LeafNodeLocation: beforeSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + } + suite.reporter.WillRun(suite.currentSpecReport) + suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel) + if suite.currentSpecReport.State.Is(types.SpecStateSkipped) { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite") + suite.skipAll = true + } + suite.processCurrentSpecReport() + } +} + +func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { + afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite) + if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: afterSuiteNode.NodeType, + LeafNodeLocation: afterSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + } + suite.reporter.WillRun(suite.currentSpecReport) + suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel) + suite.processCurrentSpecReport() + } + + afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse() + if len(afterSuiteCleanup) > 0 { + for _, cleanupNode := range afterSuiteCleanup { + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: cleanupNode.NodeType, + LeafNodeLocation: cleanupNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + } + suite.reporter.WillRun(suite.currentSpecReport) + suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel) + suite.processCurrentSpecReport() + } + } +} + +func (suite *Suite) runReportAfterSuite() { + for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) { + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: node.NodeType, + LeafNodeLocation: node.CodeLocation, + LeafNodeText: node.Text, + ParallelProcess: suite.config.ParallelProcess, + } + suite.reporter.WillRun(suite.currentSpecReport) + suite.runReportAfterSuiteNode(node, suite.report) + suite.processCurrentSpecReport() + } +} + +func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { + if suite.config.DryRun { + return + } + + nodes := spec.Nodes.WithType(nodeType) + if nodeType == types.NodeTypeReportAfterEach { + nodes = nodes.SortedByDescendingNestingLevel() + } + if nodeType == types.NodeTypeReportBeforeEach { + nodes = nodes.SortedByAscendingNestingLevel() + } + if len(nodes) == 0 { + return + } + + for i := range nodes { + suite.writer.Truncate() + suite.outputInterceptor.StartInterceptingOutput() + report := suite.currentSpecReport + nodes[i].Body = func() { + nodes[i].ReportEachBody(report) + } + suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS, + "{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node. To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}", + nodeType, nodeType, nodeType, + nodes[i].CodeLocation, + )) + state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i])) + suite.interruptHandler.ClearInterruptPlaceholderMessage() + // If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state. + // Also, if the reporter is every aborted - always override the state to propagate the abort + if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) { + suite.currentSpecReport.State = state + suite.currentSpecReport.Failure = failure + } + suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes()) + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + } +} + +func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { + if suite.config.DryRun { + suite.currentSpecReport.State = types.SpecStatePassed + return + } + + suite.writer.Truncate() + suite.outputInterceptor.StartInterceptingOutput() + suite.currentSpecReport.StartTime = time.Now() + + var err error + switch node.NodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite: + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + case types.NodeTypeCleanupAfterSuite: + if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 { + err = suite.client.BlockUntilNonprimaryProcsHaveFinished() + } + if err == nil { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + } + case types.NodeTypeSynchronizedBeforeSuite: + var data []byte + var runAllProcs bool + if suite.config.ParallelProcess == 1 { + if suite.config.ParallelTotal > 1 { + suite.outputInterceptor.StopInterceptingAndReturnOutput() + suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client) + } + node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() } + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + if suite.config.ParallelTotal > 1 { + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + suite.outputInterceptor.StartInterceptingOutput() + if suite.currentSpecReport.State.Is(types.SpecStatePassed) { + err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data) + } else { + err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil) + } + } + runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil + } else { + var proc1State types.SpecState + proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData() + switch proc1State { + case types.SpecStatePassed: + runAllProcs = true + case types.SpecStateFailed, types.SpecStatePanicked: + err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1() + case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped: + suite.currentSpecReport.State = proc1State + } + } + if runAllProcs { + node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) } + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + } + case types.NodeTypeSynchronizedAfterSuite: + node.Body = node.SynchronizedAfterSuiteAllProcsBody + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + if suite.config.ParallelProcess == 1 { + if suite.config.ParallelTotal > 1 { + err = suite.client.BlockUntilNonprimaryProcsHaveFinished() + } + if err == nil { + if suite.config.ParallelTotal > 1 { + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client) + } + + node.Body = node.SynchronizedAfterSuiteProc1Body + state, failure := suite.runNode(node, interruptChannel, "") + if suite.currentSpecReport.State.Is(types.SpecStatePassed) { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure + } + } + } + } + + if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + } + + suite.currentSpecReport.EndTime = time.Now() + suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) + suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) + suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() + + return +} + +func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) { + if suite.config.DryRun { + suite.currentSpecReport.State = types.SpecStatePassed + return + } + + suite.writer.Truncate() + suite.outputInterceptor.StartInterceptingOutput() + suite.currentSpecReport.StartTime = time.Now() + + if suite.config.ParallelTotal > 1 { + aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport() + if err != nil { + suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + return + } + report = report.Add(aggregatedReport) + } + + node.Body = func() { node.ReportAfterSuiteBody(report) } + suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS, + "{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node. To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}", + node.CodeLocation, + )) + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "") + suite.interruptHandler.ClearInterruptPlaceholderMessage() + + suite.currentSpecReport.EndTime = time.Now() + suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) + suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) + suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput() + + return +} + +func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) { + if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) { + suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node) + } + + suite.currentNode = node + defer func() { + suite.currentNode = Node{} + }() + + if suite.config.EmitSpecProgress { + if text == "" { + text = "TOP-LEVEL" + } + s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String()) + suite.writer.Write([]byte(s)) + } + + var failure types.Failure + failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation + if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) { + failure.FailureNodeContext = types.FailureNodeIsLeafNode + } else if node.NestingLevel <= 0 { + failure.FailureNodeContext = types.FailureNodeAtTopLevel + } else { + failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1 + } + + outcomeC := make(chan types.SpecState) + failureC := make(chan types.Failure) + + go func() { + finished := false + defer func() { + if e := recover(); e != nil || !finished { + suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e) + } + + outcome, failureFromRun := suite.failer.Drain() + outcomeC <- outcome + failureC <- failureFromRun + }() + + node.Body() + finished = true + }() + + select { + case outcome := <-outcomeC: + failureFromRun := <-failureC + if outcome == types.SpecStatePassed { + return outcome, types.Failure{} + } + failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic + return outcome, failure + case <-interruptChannel: + failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation + return types.SpecStateInterrupted, failure + } +} + +func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure { + return types.Failure{ + Message: message, + Location: node.CodeLocation, + FailureNodeContext: types.FailureNodeIsLeafNode, + FailureNodeType: node.NodeType, + FailureNodeLocation: node.CodeLocation, + } +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go similarity index 56% rename from vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go rename to vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index 4dcfaf4c..2f42b264 100644 --- a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -3,40 +3,53 @@ package testingtproxy import ( "fmt" "io" + "os" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/types" ) type failFunc func(message string, callerSkip ...int) type skipFunc func(message string, callerSkip ...int) -type failedFunc func() bool -type nameFunc func() string +type cleanupFunc func(args ...interface{}) +type reportFunc func() types.SpecReport -func New(writer io.Writer, fail failFunc, skip skipFunc, failed failedFunc, name nameFunc, offset int) *ginkgoTestingTProxy { +func New(writer io.Writer, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, offset int) *ginkgoTestingTProxy { return &ginkgoTestingTProxy{ - fail: fail, - offset: offset, - writer: writer, - skip: skip, - failed: failed, - name: name, + fail: fail, + offset: offset, + writer: writer, + skip: skip, + cleanup: cleanup, + report: report, } } type ginkgoTestingTProxy struct { - fail failFunc - skip skipFunc - failed failedFunc - name nameFunc - offset int - writer io.Writer + fail failFunc + skip skipFunc + cleanup cleanupFunc + report reportFunc + offset int + writer io.Writer } -func (t *ginkgoTestingTProxy) Cleanup(func()) { - // No-op +func (t *ginkgoTestingTProxy) Cleanup(f func()) { + t.cleanup(f, internal.Offset(1)) } -func (t *ginkgoTestingTProxy) Setenv(kev, value string) { - fmt.Println("Setenv is a noop for Ginkgo at the moment but will be implemented in V2") - // No-op until Cleanup is implemented +func (t *ginkgoTestingTProxy) Setenv(key, value string) { + originalValue, exists := os.LookupEnv(key) + if exists { + t.cleanup(os.Setenv, key, originalValue, internal.Offset(1)) + } else { + t.cleanup(os.Unsetenv, key, internal.Offset(1)) + } + + err := os.Setenv(key, value) + if err != nil { + t.fail(fmt.Sprintf("Failed to set environment variable: %v", err), 1) + } } func (t *ginkgoTestingTProxy) Error(args ...interface{}) { @@ -56,7 +69,7 @@ func (t *ginkgoTestingTProxy) FailNow() { } func (t *ginkgoTestingTProxy) Failed() bool { - return t.failed() + return t.report().Failed() } func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { @@ -80,7 +93,7 @@ func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { } func (t *ginkgoTestingTProxy) Name() string { - return t.name() + return t.report().FullText() } func (t *ginkgoTestingTProxy) Parallel() { @@ -100,10 +113,16 @@ func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { } func (t *ginkgoTestingTProxy) Skipped() bool { - return false + return t.report().State.Is(types.SpecStateSkipped) } func (t *ginkgoTestingTProxy) TempDir() string { - // No-op - return "" + tmpDir, err := os.MkdirTemp("", "ginkgo") + if err != nil { + t.fail(fmt.Sprintf("Failed to create temporary directory: %v", err), 1) + return "" + } + t.cleanup(os.RemoveAll, tmpDir) + + return tmpDir } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/tree.go b/vendor/github.com/onsi/ginkgo/v2/internal/tree.go new file mode 100644 index 00000000..f9d1eeb8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/tree.go @@ -0,0 +1,77 @@ +package internal + +import "github.com/onsi/ginkgo/v2/types" + +type TreeNode struct { + Node Node + Parent *TreeNode + Children TreeNodes +} + +func (tn *TreeNode) AppendChild(child *TreeNode) { + tn.Children = append(tn.Children, child) + child.Parent = tn +} + +func (tn *TreeNode) AncestorNodeChain() Nodes { + if tn.Parent == nil || tn.Parent.Node.IsZero() { + return Nodes{tn.Node} + } + return append(tn.Parent.AncestorNodeChain(), tn.Node) +} + +type TreeNodes []*TreeNode + +func (tn TreeNodes) Nodes() Nodes { + out := make(Nodes, len(tn)) + for i := range tn { + out[i] = tn[i].Node + } + return out +} + +func (tn TreeNodes) WithID(id uint) *TreeNode { + for i := range tn { + if tn[i].Node.ID == id { + return tn[i] + } + } + + return nil +} + +func GenerateSpecsFromTreeRoot(tree *TreeNode) Specs { + var walkTree func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs + walkTree = func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs { + tests := Specs{} + + nodes := make(Nodes, len(trees)) + for i := range trees { + nodes[i] = trees[i].Node + nodes[i].NestingLevel = nestingLevel + } + + for i := range nodes { + if !nodes[i].NodeType.Is(types.NodeTypesForContainerAndIt) { + continue + } + leftNodes, rightNodes := nodes.SplitAround(nodes[i]) + leftNodes = leftNodes.WithoutType(types.NodeTypesForContainerAndIt) + rightNodes = rightNodes.WithoutType(types.NodeTypesForContainerAndIt) + + leftNodes = lNodes.CopyAppend(leftNodes...) + rightNodes = rightNodes.CopyAppend(rNodes...) + + if nodes[i].NodeType.Is(types.NodeTypeIt) { + tests = append(tests, Spec{Nodes: leftNodes.CopyAppend(nodes[i]).CopyAppend(rightNodes...)}) + } else { + treeNode := trees.WithID(nodes[i].ID) + tests = append(tests, walkTree(nestingLevel+1, leftNodes.CopyAppend(nodes[i]), rightNodes, treeNode.Children)...) + } + } + + return tests + } + + return walkTree(0, Nodes{}, Nodes{}, tree.Children) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go new file mode 100644 index 00000000..b02a90e2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -0,0 +1,103 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "sync" +) + +type WriterMode uint + +const ( + WriterModeStreamAndBuffer WriterMode = iota + WriterModeBufferOnly +) + +type WriterInterface interface { + io.Writer + + Truncate() + Bytes() []byte +} + +//Writer impplements WriterInterface and GinkgoWriterInterface +type Writer struct { + buffer *bytes.Buffer + outWriter io.Writer + lock *sync.Mutex + mode WriterMode + + teeWriters []io.Writer +} + +func NewWriter(outWriter io.Writer) *Writer { + return &Writer{ + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + mode: WriterModeStreamAndBuffer, + } +} + +func (w *Writer) SetMode(mode WriterMode) { + w.lock.Lock() + defer w.lock.Unlock() + w.mode = mode +} + +func (w *Writer) Write(b []byte) (n int, err error) { + w.lock.Lock() + defer w.lock.Unlock() + + for _, teeWriter := range w.teeWriters { + teeWriter.Write(b) + } + + if w.mode == WriterModeStreamAndBuffer { + w.outWriter.Write(b) + } + return w.buffer.Write(b) +} + +func (w *Writer) Truncate() { + w.lock.Lock() + defer w.lock.Unlock() + w.buffer.Reset() +} + +func (w *Writer) Bytes() []byte { + w.lock.Lock() + defer w.lock.Unlock() + b := w.buffer.Bytes() + copied := make([]byte, len(b)) + copy(copied, b) + return copied +} + +//GinkgoWriterInterface +func (w *Writer) TeeTo(writer io.Writer) { + w.lock.Lock() + defer w.lock.Unlock() + + w.teeWriters = append(w.teeWriters, writer) +} + +func (w *Writer) ClearTeeWriters() { + w.lock.Lock() + defer w.lock.Unlock() + + w.teeWriters = []io.Writer{} +} + +func (w *Writer) Print(a ...interface{}) { + fmt.Fprint(w, a...) +} + +func (w *Writer) Printf(format string, a ...interface{}) { + fmt.Fprintf(w, format, a...) +} + +func (w *Writer) Println(a ...interface{}) { + fmt.Fprintln(w, a...) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go new file mode 100644 index 00000000..f39802ff --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -0,0 +1,410 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "fmt" + "io" + "runtime" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type DefaultReporter struct { + conf types.ReporterConfig + writer io.Writer + + // managing the emission stream + lastChar string + lastEmissionWasDelimiter bool + + // rendering + specDenoter string + retryDenoter string + formatter formatter.Formatter +} + +func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := NewDefaultReporter(conf, writer) + reporter.formatter = formatter.New(formatter.ColorModePassthrough) + + return reporter +} + +func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := &DefaultReporter{ + conf: conf, + writer: writer, + + lastChar: "\n", + lastEmissionWasDelimiter: false, + + specDenoter: "•", + retryDenoter: "↺", + formatter: formatter.NewWithNoColorBool(conf.NoColor), + } + if runtime.GOOS == "windows" { + reporter.specDenoter = "+" + reporter.retryDenoter = "R" + } + + return reporter +} + +/* The Reporter Interface */ + +func (r *DefaultReporter) SuiteWillBegin(report types.Report) { + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) { + r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription)) + if len(report.SuiteLabels) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) + } + r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) + } + } else { + banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath) + r.emitBlock(banner) + bannerWidth := len(banner) + if len(report.SuiteLabels) > 0 { + labels := strings.Join(report.SuiteLabels, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels)) + if len(labels)+2 > bannerWidth { + bannerWidth = len(labels) + 2 + } + } + r.emitBlock(strings.Repeat("=", bannerWidth)) + + out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) + if report.SuiteConfig.RandomizeAllSpecs { + out += r.f(" - will randomize all specs") + } + r.emitBlock(out) + r.emit("\n") + r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal)) + } + } +} + +func (r *DefaultReporter) WillRun(report types.SpecReport) { + if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) { + return + } + + r.emitDelimiter() + indentation := uint(0) + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + r.emitBlock(r.f("{{bold}}[%s] %s{{/}}", report.LeafNodeType.String(), report.LeafNodeText)) + } else { + if len(report.ContainerHierarchyTexts) > 0 { + r.emitBlock(r.cycleJoin(report.ContainerHierarchyTexts, " ")) + indentation = 1 + } + line := r.fi(indentation, "{{bold}}%s{{/}}", report.LeafNodeText) + labels := report.Labels() + if len(labels) > 0 { + line += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels, ", ")) + } + r.emitBlock(line) + } + r.emitBlock(r.fi(indentation, "{{gray}}%s{{/}}", report.LeafNodeLocation)) +} + +func (r *DefaultReporter) DidRun(report types.SpecReport) { + v := r.conf.Verbosity() + var header, highlightColor string + includeRuntime, emitGinkgoWriterOutput, stream, denoter := true, true, false, r.specDenoter + succinctLocationBlock := v.Is(types.VerbosityLevelSuccinct) + + hasGW := report.CapturedGinkgoWriterOutput != "" + hasStd := report.CapturedStdOutErr != "" + hasEmittableReports := report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) || (report.ReportEntries.HasVisibility(types.ReportEntryVisibilityFailureOrVerbose) && (!report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose))) + + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + denoter = fmt.Sprintf("[%s]", report.LeafNodeType) + } + + switch report.State { + case types.SpecStatePassed: + highlightColor, succinctLocationBlock = "{{green}}", v.LT(types.VerbosityLevelVerbose) + emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports { + header = fmt.Sprintf("%s PASSED", denoter) + } else { + return + } + } else { + header, stream = denoter, true + if report.NumAttempts > 1 { + header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false + } + if report.RunTime > r.conf.SlowSpecThreshold { + header, stream = fmt.Sprintf("%s [SLOW TEST]", header), false + } + } + if hasStd || emitGinkgoWriterOutput || hasEmittableReports { + stream = false + } + case types.SpecStatePending: + highlightColor = "{{yellow}}" + includeRuntime, emitGinkgoWriterOutput = false, false + if v.Is(types.VerbosityLevelSuccinct) { + header, stream = "P", true + } else { + header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose) + } + case types.SpecStateSkipped: + highlightColor = "{{cyan}}" + if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) { + header = "S [SKIPPED]" + } else { + header, stream = "S", true + } + case types.SpecStateFailed: + highlightColor, header = "{{red}}", fmt.Sprintf("%s [FAILED]", denoter) + case types.SpecStatePanicked: + highlightColor, header = "{{magenta}}", fmt.Sprintf("%s! [PANICKED]", denoter) + case types.SpecStateInterrupted: + highlightColor, header = "{{orange}}", fmt.Sprintf("%s! [INTERRUPTED]", denoter) + case types.SpecStateAborted: + highlightColor, header = "{{coral}}", fmt.Sprintf("%s! [ABORTED]", denoter) + } + + // Emit stream and return + if stream { + r.emit(r.f(highlightColor + header + "{{/}}")) + return + } + + // Emit header + r.emitDelimiter() + if includeRuntime { + header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) + } + r.emitBlock(r.f(highlightColor + header + "{{/}}")) + + // Emit Code Location Block + r.emitBlock(r.codeLocationBlock(report, highlightColor, succinctLocationBlock, false)) + + //Emit Stdout/Stderr Output + if hasStd { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Begin Captured StdOut/StdErr Output >>{{/}}")) + r.emitBlock(r.fi(2, "%s", report.CapturedStdOutErr)) + r.emitBlock(r.fi(1, "{{gray}}<< End Captured StdOut/StdErr Output{{/}}")) + } + + //Emit Captured GinkgoWriter Output + if emitGinkgoWriterOutput && hasGW { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + r.emitBlock(r.fi(2, "%s", report.CapturedGinkgoWriterOutput)) + r.emitBlock(r.fi(1, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) + } + + if hasEmittableReports { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Begin Report Entries >>{{/}}")) + reportEntries := report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) + if !report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose) { + reportEntries = report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose) + } + for _, entry := range reportEntries { + r.emitBlock(r.fi(2, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))) + if representation := entry.StringRepresentation(); representation != "" { + r.emitBlock(r.fi(3, representation)) + } + } + r.emitBlock(r.fi(1, "{{gray}}<< End Report Entries{{/}}")) + } + + // Emit Failure Message + if !report.Failure.IsZero() { + r.emitBlock("\n") + r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.Message)) + r.emitBlock(r.fi(1, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.Location)) + if report.Failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.ForwardedPanic)) + } + + if r.conf.FullTrace || report.Failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(1, highlightColor+"Full Stack Trace{{/}}")) + r.emitBlock(r.fi(2, "%s", report.Failure.Location.FullStackTrace)) + } + } + + r.emitDelimiter() +} + +func (r *DefaultReporter) SuiteDidEnd(report types.Report) { + failures := report.SpecReports.WithState(types.SpecStateFailureStates) + if len(failures) > 1 { + r.emitBlock("\n\n") + r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + for _, specReport := range failures { + highlightColor, heading := "{{red}}", "[FAIL]" + switch specReport.State { + case types.SpecStatePanicked: + highlightColor, heading = "{{magenta}}", "[PANICKED!]" + case types.SpecStateAborted: + highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateInterrupted: + highlightColor, heading = "{{orange}}", "[INTERRUPTED]" + } + locationBlock := r.codeLocationBlock(specReport, highlightColor, true, true) + r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) + } + } + + //summarize the suite + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { + r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) + return + } + + r.emitBlock("\n") + color, status := "{{green}}{{bold}}", "SUCCESS!" + if !report.SuiteSucceeded { + color, status = "{{red}}{{bold}}", "FAIL!" + } + + specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes + r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", + specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), + report.PreRunStats.TotalSpecs, + report.RunTime.Seconds()), + ) + + switch len(report.SpecialSuiteFailureReasons) { + case 0: + r.emit(r.f(color+"%s{{/}} -- ", status)) + case 1: + r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) + default: + r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) + } + + if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { + r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) + } else { + r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) + r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) + if specs.CountOfFlakedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) + } + r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) + r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) + } +} + +/* Emitting to the writer */ +func (r *DefaultReporter) emit(s string) { + if len(s) > 0 { + r.lastChar = s[len(s)-1:] + r.lastEmissionWasDelimiter = false + r.writer.Write([]byte(s)) + } +} + +func (r *DefaultReporter) emitBlock(s string) { + if len(s) > 0 { + if r.lastChar != "\n" { + r.emit("\n") + } + r.emit(s) + if r.lastChar != "\n" { + r.emit("\n") + } + } +} + +func (r *DefaultReporter) emitDelimiter() { + if r.lastEmissionWasDelimiter { + return + } + r.emitBlock(r.f("{{gray}}%s{{/}}", strings.Repeat("-", 30))) + r.lastEmissionWasDelimiter = true +} + +/* Rendering text */ +func (r *DefaultReporter) f(format string, args ...interface{}) string { + return r.formatter.F(format, args...) +} + +func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string { + return r.formatter.Fi(indentation, format, args...) +} + +func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { + return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) +} + +func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, succinct bool, usePreciseFailureLocation bool) string { + texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} + texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) + } else { + texts = append(texts, report.LeafNodeText) + } + labels = append(labels, report.LeafNodeLabels) + locations = append(locations, report.LeafNodeLocation) + + failureLocation := report.Failure.FailureNodeLocation + if usePreciseFailureLocation { + failureLocation = report.Failure.Location + } + + switch report.Failure.FailureNodeContext { + case types.FailureNodeAtTopLevel: + texts = append([]string{r.f(highlightColor+"{{bold}}TOP-LEVEL [%s]{{/}}", report.Failure.FailureNodeType)}, texts...) + locations = append([]types.CodeLocation{failureLocation}, locations...) + labels = append([][]string{{}}, labels...) + case types.FailureNodeInContainer: + i := report.Failure.FailureNodeContainerIndex + texts[i] = r.f(highlightColor+"{{bold}}%s [%s]{{/}}", texts[i], report.Failure.FailureNodeType) + locations[i] = failureLocation + case types.FailureNodeIsLeafNode: + i := len(texts) - 1 + texts[i] = r.f(highlightColor+"{{bold}}[%s] %s{{/}}", report.LeafNodeType, report.LeafNodeText) + locations[i] = failureLocation + } + + out := "" + if succinct { + out += r.f("%s", r.cycleJoin(texts, " ")) + flattenedLabels := report.Labels() + if len(flattenedLabels) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) + } + out += "\n" + if usePreciseFailureLocation { + out += r.f("{{gray}}%s{{/}}", failureLocation) + } else { + out += r.f("{{gray}}%s{{/}}", locations[len(locations)-1]) + } + } else { + for i := range texts { + out += r.fi(uint(i), "%s", texts[i]) + if len(labels[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + } + out += "\n" + out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go new file mode 100644 index 00000000..89d30076 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go @@ -0,0 +1,149 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters +// this has been removed in V2. +// Please read the documentation at: +// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +// for Ginkgo's new behavior and for a migration path. +type DeprecatedReporter interface { + SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SuiteDidEnd(summary *types.SuiteSummary) +} + +// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and +// calls the custom reporter's methods with appropriately transformed data from the V2 report. +// +// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()` +// +// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new +// reporting support in V2. It will be removed in a future minor version of Ginkgo. +func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) { + conf := config.DeprecatedGinkgoConfigType{ + RandomSeed: report.SuiteConfig.RandomSeed, + RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs, + FocusStrings: report.SuiteConfig.FocusStrings, + SkipStrings: report.SuiteConfig.SkipStrings, + FailOnPending: report.SuiteConfig.FailOnPending, + FailFast: report.SuiteConfig.FailFast, + FlakeAttempts: report.SuiteConfig.FlakeAttempts, + EmitSpecProgress: report.SuiteConfig.EmitSpecProgress, + DryRun: report.SuiteConfig.DryRun, + ParallelNode: report.SuiteConfig.ParallelProcess, + ParallelTotal: report.SuiteConfig.ParallelTotal, + SyncHost: report.SuiteConfig.ParallelHost, + StreamHost: report.SuiteConfig.ParallelHost, + } + + summary := &types.DeprecatedSuiteSummary{ + SuiteDescription: report.SuiteDescription, + SuiteID: report.SuitePath, + + NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs, + NumberOfTotalSpecs: report.PreRunStats.TotalSpecs, + NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun, + } + + reporter.SuiteWillBegin(conf, summary) + + for _, spec := range report.SpecReports { + switch spec.LeafNodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.BeforeSuiteDidRun(setupSummary) + case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.AfterSuiteDidRun(setupSummary) + case types.NodeTypeIt: + componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{} + componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...) + componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...) + componentTexts = append(componentTexts, spec.LeafNodeText) + componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation) + + specSummary := &types.DeprecatedSpecSummary{ + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + NumberOfSamples: spec.NumAttempts, + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.SpecWillRun(specSummary) + reporter.SpecDidComplete(specSummary) + + switch spec.State { + case types.SpecStatePending: + summary.NumberOfPendingSpecs += 1 + case types.SpecStateSkipped: + summary.NumberOfSkippedSpecs += 1 + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted: + summary.NumberOfFailedSpecs += 1 + case types.SpecStatePassed: + summary.NumberOfPassedSpecs += 1 + if spec.NumAttempts > 1 { + summary.NumberOfFlakedSpecs += 1 + } + } + } + } + + summary.SuiteSucceeded = report.SuiteSucceeded + summary.RunTime = report.RunTime + + reporter.SuiteDidEnd(summary) +} + +func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure { + if spec.Failure.IsZero() { + return types.DeprecatedSpecFailure{} + } + + index := 0 + switch spec.Failure.FailureNodeContext { + case types.FailureNodeInContainer: + index = spec.Failure.FailureNodeContainerIndex + case types.FailureNodeAtTopLevel: + index = -1 + case types.FailureNodeIsLeafNode: + index = len(spec.ContainerHierarchyTexts) - 1 + if spec.LeafNodeText != "" { + index += 1 + } + } + + return types.DeprecatedSpecFailure{ + Message: spec.Failure.Message, + Location: spec.Failure.Location, + ForwardedPanic: spec.Failure.ForwardedPanic, + ComponentIndex: index, + ComponentType: spec.Failure.FailureNodeType, + ComponentCodeLocation: spec.Failure.FailureNodeLocation, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go new file mode 100644 index 00000000..7f96c450 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -0,0 +1,60 @@ +package reporters + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/types" +) + +//GenerateJSONReport produces a JSON-formatted report at the passed in destination +func GenerateJSONReport(report types.Report, destination string) error { + f, err := os.Create(destination) + if err != nil { + return err + } + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode([]types.Report{ + report, + }) + if err != nil { + return err + } + return f.Close() +} + +//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +//It skips over reports that fail to decode but reports on them via the returned messages []string +func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { + messages := []string{} + allReports := []types.Report{} + for _, source := range sources { + reports := []types.Report{} + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = json.Unmarshal(data, &reports) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + allReports = append(allReports, reports...) + } + + f, err := os.Create(destination) + if err != nil { + return messages, err + } + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode(allReports) + if err != nil { + return messages, err + } + return messages, f.Close() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go new file mode 100644 index 00000000..87556d03 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -0,0 +1,307 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/ + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending and/or skipped + Disabled int `xml:"disabled,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all test suites + Time float64 `xml:"time,attr"` + + //The set of all test suites + TestSuites []JUnitTestSuite `xml:"testsuite"` +} + +type JUnitTestSuite struct { + // Name maps onto the description of the test suite - maps onto Report.SuiteDescription + Name string `xml:"name,attr"` + // Package maps onto the aboslute path to the test suite - maps onto Report.SuitePath + Package string `xml:"package,attr"` + // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending + Disabled int `xml:"disabled,attr"` + // Skiped maps onto specs that are skipped + Skipped int `xml:"skipped,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime + Time float64 `xml:"time,attr"` + // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime + Timestamp string `xml:"timestamp,attr"` + + //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs + Properties JUnitProperties `xml:"properties"` + + //TestCases capture the individual specs + TestCases []JUnitTestCase `xml:"testcase"` +} + +type JUnitProperties struct { + Properties []JUnitProperty `xml:"property"` +} + +func (jup JUnitProperties) WithName(name string) string { + for _, property := range jup.Properties { + if property.Name == name { + return property.Value + } + } + return "" +} + +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +type JUnitTestCase struct { + // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" + Name string `xml:"name,attr"` + // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription + Classname string `xml:"classname,attr"` + // Status maps onto the string representation of SpecReport.State + Status string `xml:"status,attr"` + // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime + Time float64 `xml:"time,attr"` + //Skipped is populated with a message if the test was skipped or pending + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + //Error is populated if the test panicked or was interrupted + Error *JUnitError `xml:"error,omitempty"` + //Failure is populated if the test failed + Failure *JUnitFailure `xml:"failure,omitempty"` + //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr + SystemOut string `xml:"system-out,omitempty"` + //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput + SystemErr string `xml:"system-err,omitempty"` +} + +type JUnitSkipped struct { + // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON) + Message string `xml:"message,attr"` +} + +type JUnitError struct { + //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interupted" + Message string `xml:"message,attr"` + //Type is one of "panicked" or "interrupted" + Type string `xml:"type,attr"` + //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines + Description string `xml:",chardata"` +} + +type JUnitFailure struct { + //Message maps onto the failure message - equivalent to SpecReport.Failure.Message + Message string `xml:"message,attr"` + //Type is "failed" + Type string `xml:"type,attr"` + //Description maps onto the location and stack trace of the failure + Description string `xml:",chardata"` +} + +func GenerateJUnitReport(report types.Report, dst string) error { + suite := JUnitTestSuite{ + Name: report.SuiteDescription, + Package: report.SuitePath, + Time: report.RunTime.Seconds(), + Timestamp: report.StartTime.Format("2006-01-02T15:04:05"), + Properties: JUnitProperties{ + Properties: []JUnitProperty{ + {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)}, + {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, + {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, + {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, + {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, + {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, + {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, + {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, + {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, + {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, + {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, + {"EmitSpecProgress", fmt.Sprintf("%t", report.SuiteConfig.EmitSpecProgress)}, + {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, + {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, + {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, + }, + }, + } + for _, spec := range report.SpecReports { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + + test := JUnitTestCase{ + Name: name, + Classname: report.SuiteDescription, + Status: spec.State.String(), + Time: spec.RunTime.Seconds(), + SystemOut: systemOutForUnstructureReporters(spec), + SystemErr: spec.CapturedGinkgoWriterOutput, + } + suite.Tests += 1 + + switch spec.State { + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + test.Skipped = &JUnitSkipped{Message: message} + suite.Skipped += 1 + case types.SpecStatePending: + test.Skipped = &JUnitSkipped{Message: "pending"} + suite.Disabled += 1 + case types.SpecStateFailed: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "failed", + Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + } + suite.Failures += 1 + case types.SpecStateInterrupted: + test.Error = &JUnitError{ + Message: "interrupted", + Type: "interrupted", + Description: spec.Failure.Message, + } + suite.Errors += 1 + case types.SpecStateAborted: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "aborted", + Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + } + suite.Errors += 1 + case types.SpecStatePanicked: + test.Error = &JUnitError{ + Message: spec.Failure.ForwardedPanic, + Type: "panicked", + Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + } + suite.Errors += 1 + } + + suite.TestCases = append(suite.TestCases, test) + } + + junitReport := JUnitTestSuites{ + Tests: suite.Tests, + Disabled: suite.Disabled + suite.Skipped, + Errors: suite.Errors, + Failures: suite.Failures, + Time: suite.Time, + TestSuites: []JUnitTestSuite{suite}, + } + + f, err := os.Create(dst) + if err != nil { + return err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(junitReport) + + return f.Close() +} + +func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) { + messages := []string{} + mergedReport := JUnitTestSuites{} + for _, source := range sources { + report := JUnitTestSuites{} + f, err := os.Open(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = xml.NewDecoder(f).Decode(&report) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + + mergedReport.Tests += report.Tests + mergedReport.Disabled += report.Disabled + mergedReport.Errors += report.Errors + mergedReport.Failures += report.Failures + mergedReport.Time += report.Time + mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) + } + + f, err := os.Create(dst) + if err != nil { + return messages, err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(mergedReport) + + return messages, f.Close() +} + +func systemOutForUnstructureReporters(spec types.SpecReport) string { + systemOut := spec.CapturedStdOutErr + if len(spec.ReportEntries) > 0 { + systemOut += "\nReport Entries:\n" + for i, entry := range spec.ReportEntries { + systemOut += fmt.Sprintf("%s\n%s\n%s\n", entry.Name, entry.Location, entry.Time.Format(time.RFC3339Nano)) + if representation := entry.StringRepresentation(); representation != "" { + systemOut += representation + "\n" + } + if i+1 < len(spec.ReportEntries) { + systemOut += "--\n" + } + } + } + return systemOut +} + +// Deprecated JUnitReporter (so folks can still compile their suites) +type JUnitReporter struct{} + +func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} } +func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {} +func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go new file mode 100644 index 00000000..29f84e7c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -0,0 +1,19 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +type Reporter interface { + SuiteWillBegin(report types.Report) + WillRun(report types.SpecReport) + DidRun(report types.SpecReport) + SuiteDidEnd(report types.Report) +} + +type NoopReporter struct{} + +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go new file mode 100644 index 00000000..2aa2f184 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -0,0 +1,97 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +func tcEscape(s string) string { + s = strings.Replace(s, "|", "||", -1) + s = strings.Replace(s, "'", "|'", -1) + s = strings.Replace(s, "\n", "|n", -1) + s = strings.Replace(s, "\r", "|r", -1) + s = strings.Replace(s, "[", "|[", -1) + s = strings.Replace(s, "]", "|]", -1) + return s +} + +func GenerateTeamcityReport(report types.Report, dst string) error { + f, err := os.Create(dst) + if err != nil { + return err + } + + name := report.SuiteDescription + labels := report.SuiteLabels + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) + for _, spec := range report.SpecReports { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + + name = tcEscape(name) + fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) + switch spec.State { + case types.SpecStatePending: + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name) + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message)) + case types.SpecStateFailed: + details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStatePanicked: + details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details)) + case types.SpecStateInterrupted: + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted' details='%s']\n", name, tcEscape(spec.Failure.Message)) + case types.SpecStateAborted: + details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + } + + fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructureReporters(spec))) + fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(spec.CapturedGinkgoWriterOutput)) + fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0)) + } + fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription)) + + return f.Close() +} + +func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) { + messages := []string{} + merged := []byte{} + for _, source := range sources { + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + merged = append(merged, data...) + } + return messages, os.WriteFile(dst, merged, 0666) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go new file mode 100644 index 00000000..b35a6ed8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -0,0 +1,153 @@ +package ginkgo + +import ( + "fmt" + "strings" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/internal/global" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +Report represents the report for a Suite. +It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#Report +*/ +type Report = types.Report + +/* +Report represents the report for a Spec. +It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport +*/ +type SpecReport = types.SpecReport + +/* +CurrentSpecReport returns information about the current running spec. +The returned object is a types.SpecReport which includes helper methods +to make extracting information about the spec easier. + +You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport +You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec +*/ +func CurrentSpecReport() SpecReport { + return global.Suite.CurrentSpecReport() +} + +/* + ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter + +- ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted. +- ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior). +- ReportEntryVisibilityNever: the ReportEntry is never emitted though it appears in any generated machine-readable reports (e.g. by setting `--json-report`). + +You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports +*/ +type ReportEntryVisibility = types.ReportEntryVisibility + +const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, ReportEntryVisibilityNever = types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose, types.ReportEntryVisibilityNever + +/* +AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport. +It can take any of the following arguments: + - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted. + - A ReportEntryVisibility enum to control the visibility of the ReportEntry + - An Offset or CodeLocation decoration to control the reported location of the ReportEntry + +If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console. + +AddReportEntry() must be called within a Subject or Setup node - not in a Container node. + +You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports +*/ +func AddReportEntry(name string, args ...interface{}) { + cl := types.NewCodeLocation(1) + reportEntry, err := internal.NewReportEntry(name, cl, args...) + if err != nil { + Fail(fmt.Sprintf("Failed to generate Report Entry:\n%s", err.Error()), 1) + } + err = global.Suite.AddReportEntry(reportEntry) + if err != nil { + Fail(fmt.Sprintf("Failed to add Report Entry:\n%s", err.Error()), 1) + } +} + +/* +ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that +receives a SpecReport. They are called before the spec starts. + +You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. +You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically +*/ +func ReportBeforeEach(body func(SpecReport)) bool { + return pushNode(internal.NewReportBeforeEachNode(body, types.NewCodeLocation(1))) +} + +/* +ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that +receives a SpecReport. They are called after the spec has completed and receive the final report for the spec. + +You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. +You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically +*/ +func ReportAfterEach(body func(SpecReport)) bool { + return pushNode(internal.NewReportAfterEachNode(body, types.NewCodeLocation(1))) +} + +/* +ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. + +They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite. +ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) + +When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across +all parallel nodes + +In addition to using ReportAfterSuite to programatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags. + +You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. +You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically +You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports +*/ +func ReportAfterSuite(text string, body func(Report)) bool { + return pushNode(internal.NewReportAfterSuiteNode(text, body, types.NewCodeLocation(1))) +} + +func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) { + body := func(report Report) { + if reporterConfig.JSONReport != "" { + err := reporters.GenerateJSONReport(report, reporterConfig.JSONReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error())) + } + } + if reporterConfig.JUnitReport != "" { + err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate JUnit report:\n%s", err.Error())) + } + } + if reporterConfig.TeamcityReport != "" { + err := reporters.GenerateTeamcityReport(report, reporterConfig.TeamcityReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate Teamcity report:\n%s", err.Error())) + } + } + } + + flags := []string{} + if reporterConfig.JSONReport != "" { + flags = append(flags, "--json-report") + } + if reporterConfig.JUnitReport != "" { + flags = append(flags, "--junit-report") + } + if reporterConfig.TeamcityReport != "" { + flags = append(flags, "--teamcity-report") + } + pushNode(internal.NewReportAfterSuiteNode( + fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), + body, + types.NewCustomCodeLocation("autogenerated by Ginkgo"), + )) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go new file mode 100644 index 00000000..d3029f15 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -0,0 +1,263 @@ +package ginkgo + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/types" +) + +/* +The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via: + + fmt.Sprintf(formatString, parameters...) + +where parameters are the parameters passed into the entry. + +When passed into an Entry the EntryDescription is used to generate the name or that entry. When passed to DescribeTable, the EntryDescription is used to generate the names for any entries that have `nil` descriptions. + +You can learn more about generating EntryDescriptions here: https://onsi.github.io/ginkgo/#generating-entry-descriptions +*/ +type EntryDescription string + +func (ed EntryDescription) render(args ...interface{}) string { + return fmt.Sprintf(string(ed), args...) +} + +/* +DescribeTable describes a table-driven spec. + +For example: + + DescribeTable("a simple table", + func(x int, y int, expected bool) { + Ω(x > y).Should(Equal(expected)) + }, + Entry("x > y", 1, 0, true), + Entry("x == y", 0, 0, false), + Entry("x < y", 0, 1, false), + ) + +You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs +And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns +*/ +func DescribeTable(description string, args ...interface{}) bool { + generateTable(description, args...) + return true +} + +/* +You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. +*/ +func FDescribeTable(description string, args ...interface{}) bool { + args = append(args, internal.Focus) + generateTable(description, args...) + return true +} + +/* +You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. +*/ +func PDescribeTable(description string, args ...interface{}) bool { + args = append(args, internal.Pending) + generateTable(description, args...) + return true +} + +/* +You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`. +*/ +var XDescribeTable = PDescribeTable + +/* +TableEntry represents an entry in a table test. You generally use the `Entry` constructor. +*/ +type TableEntry struct { + description interface{} + decorations []interface{} + parameters []interface{} + codeLocation types.CodeLocation +} + +/* +Entry constructs a TableEntry. + +The first argument is a description. This can be a string, a function that accepts the parameters passed to the TableEntry and returns a string, an EntryDescription format string, or nil. If nil is provided then the name of the Entry is derived using the table-level entry description. +Subsequent arguments accept any Ginkgo decorators. These are filtered out and the remaining arguments are passed into the Spec function associated with the table. + +Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in. + +You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs +*/ +func Entry(description interface{}, args ...interface{}) TableEntry { + decorations, parameters := internal.PartitionDecorations(args...) + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} +} + +/* +You can focus a particular entry with FEntry. This is equivalent to FIt. +*/ +func FEntry(description interface{}, args ...interface{}) TableEntry { + decorations, parameters := internal.PartitionDecorations(args...) + decorations = append(decorations, internal.Focus) + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} +} + +/* +You can mark a particular entry as pending with PEntry. This is equivalent to PIt. +*/ +func PEntry(description interface{}, args ...interface{}) TableEntry { + decorations, parameters := internal.PartitionDecorations(args...) + decorations = append(decorations, internal.Pending) + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} +} + +/* +You can mark a particular entry as pending with XEntry. This is equivalent to XIt. +*/ +var XEntry = PEntry + +func generateTable(description string, args ...interface{}) { + cl := types.NewCodeLocation(2) + containerNodeArgs := []interface{}{cl} + + entries := []TableEntry{} + var itBody interface{} + + var tableLevelEntryDescription interface{} + tableLevelEntryDescription = func(args ...interface{}) string { + out := []string{} + for _, arg := range args { + out = append(out, fmt.Sprint(arg)) + } + return "Entry: " + strings.Join(out, ", ") + } + + for _, arg := range args { + switch t := reflect.TypeOf(arg); { + case t == reflect.TypeOf(TableEntry{}): + entries = append(entries, arg.(TableEntry)) + case t == reflect.TypeOf([]TableEntry{}): + entries = append(entries, arg.([]TableEntry)...) + case t == reflect.TypeOf(EntryDescription("")): + tableLevelEntryDescription = arg.(EntryDescription).render + case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): + tableLevelEntryDescription = arg + case t.Kind() == reflect.Func: + if itBody != nil { + exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl)) + } + itBody = arg + default: + containerNodeArgs = append(containerNodeArgs, arg) + } + } + + containerNodeArgs = append(containerNodeArgs, func() { + for _, entry := range entries { + var err error + entry := entry + var description string + switch t := reflect.TypeOf(entry.description); { + case t == nil: + err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation) + if err == nil { + description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String() + } + case t == reflect.TypeOf(EntryDescription("")): + description = entry.description.(EntryDescription).render(entry.parameters...) + case t == reflect.TypeOf(""): + description = entry.description.(string) + case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): + err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation) + if err == nil { + description = invokeFunction(entry.description, entry.parameters)[0].String() + } + default: + err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) + } + + if err == nil { + err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation) + } + itNodeArgs := []interface{}{entry.codeLocation} + itNodeArgs = append(itNodeArgs, entry.decorations...) + itNodeArgs = append(itNodeArgs, func() { + if err != nil { + panic(err) + } + invokeFunction(itBody, entry.parameters) + }) + + pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...)) + } + }) + + pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...)) +} + +func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value { + inValues := make([]reflect.Value, len(parameters)) + + funcType := reflect.TypeOf(function) + limit := funcType.NumIn() + if funcType.IsVariadic() { + limit = limit - 1 + } + + for i := 0; i < limit && i < len(parameters); i++ { + inValues[i] = computeValue(parameters[i], funcType.In(i)) + } + + if funcType.IsVariadic() { + variadicType := funcType.In(limit).Elem() + for i := limit; i < len(parameters); i++ { + inValues[i] = computeValue(parameters[i], variadicType) + } + } + + return reflect.ValueOf(function).Call(inValues) +} + +func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation) error { + funcType := reflect.TypeOf(function) + limit := funcType.NumIn() + if funcType.IsVariadic() { + limit = limit - 1 + } + if len(parameters) < limit { + return types.GinkgoErrors.TooFewParametersToTableFunction(limit, len(parameters), kind, cl) + } + if len(parameters) > limit && !funcType.IsVariadic() { + return types.GinkgoErrors.TooManyParametersToTableFunction(limit, len(parameters), kind, cl) + } + var i = 0 + for ; i < limit; i++ { + actual := reflect.TypeOf(parameters[i]) + expected := funcType.In(i) + if !(actual == nil) && !actual.AssignableTo(expected) { + return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl) + } + } + if funcType.IsVariadic() { + expected := funcType.In(limit).Elem() + for ; i < len(parameters); i++ { + actual := reflect.TypeOf(parameters[i]) + if !(actual == nil) && !actual.AssignableTo(expected) { + return types.GinkgoErrors.IncorrectVariadicParameterTypeToTableFunction(expected, actual, kind, cl) + } + } + } + + return nil +} + +func computeValue(parameter interface{}, t reflect.Type) reflect.Value { + if parameter == nil { + return reflect.Zero(t) + } else { + return reflect.ValueOf(parameter) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go new file mode 100644 index 00000000..00107d3a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -0,0 +1,92 @@ +package types + +import ( + "fmt" + "os" + "regexp" + "runtime" + "runtime/debug" + "strings" +) + +type CodeLocation struct { + FileName string `json:",omitempty"` + LineNumber int `json:",omitempty"` + FullStackTrace string `json:",omitempty"` + CustomMessage string `json:",omitempty"` +} + +func (codeLocation CodeLocation) String() string { + if codeLocation.CustomMessage != "" { + return codeLocation.CustomMessage + } + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} + +func (codeLocation CodeLocation) ContentsOfLine() string { + if codeLocation.CustomMessage != "" { + return "" + } + contents, err := os.ReadFile(codeLocation.FileName) + if err != nil { + return "" + } + lines := strings.Split(string(contents), "\n") + if len(lines) < codeLocation.LineNumber { + return "" + } + return lines[codeLocation.LineNumber-1] +} + +func NewCustomCodeLocation(message string) CodeLocation { + return CodeLocation{ + CustomMessage: message, + } +} + +func NewCodeLocation(skip int) CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + return CodeLocation{FileName: file, LineNumber: line} +} + +func NewCodeLocationWithStackTrace(skip int) CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + stackTrace := PruneStack(string(debug.Stack()), skip+1) + return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} +} + +// PruneStack removes references to functions that are internal to Ginkgo +// and the Go runtime from a stack string and a certain number of stack entries +// at the beginning of the stack. The stack string has the format +// as returned by runtime/debug.Stack. The leading goroutine information is +// optional and always removed if present. Beware that runtime/debug.Stack +// adds itself as first entry, so typically skip must be >= 1 to remove that +// entry. +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + // Ensure that the even entries are the method names and the + // the odd entries the source code information. + if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { + // Ignore "goroutine 29 [running]:" line. + stack = stack[1:] + } + // The "+1" is for skipping over the initial entry, which is + // runtime/debug.Stack() itself. + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" { + prunedStack = stack + } else { + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + // We filter out based on the source code file name. + if !re.Match([]byte(stack[i*2+1])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go new file mode 100644 index 00000000..8ebd329e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -0,0 +1,723 @@ +/* +Ginkgo accepts a number of configuration options. +These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli) +*/ + +package types + +import ( + "flag" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +// Configuration controlling how an individual test suite is run +type SuiteConfig struct { + RandomSeed int64 + RandomizeAllSpecs bool + FocusStrings []string + SkipStrings []string + FocusFiles []string + SkipFiles []string + LabelFilter string + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + Timeout time.Duration + OutputInterceptorMode string + + ParallelProcess int + ParallelTotal int + ParallelHost string +} + +func NewDefaultSuiteConfig() SuiteConfig { + return SuiteConfig{ + RandomSeed: time.Now().Unix(), + Timeout: time.Hour, + ParallelProcess: 1, + ParallelTotal: 1, + } +} + +type VerbosityLevel uint + +const ( + VerbosityLevelSuccinct VerbosityLevel = iota + VerbosityLevelNormal + VerbosityLevelVerbose + VerbosityLevelVeryVerbose +) + +func (vl VerbosityLevel) GT(comp VerbosityLevel) bool { + return vl > comp +} + +func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool { + return vl >= comp +} + +func (vl VerbosityLevel) Is(comp VerbosityLevel) bool { + return vl == comp +} + +func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool { + return vl <= comp +} + +func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { + return vl < comp +} + +// Configuration for Ginkgo's reporter +type ReporterConfig struct { + NoColor bool + SlowSpecThreshold time.Duration + Succinct bool + Verbose bool + VeryVerbose bool + FullTrace bool + AlwaysEmitGinkgoWriter bool + + JSONReport string + JUnitReport string + TeamcityReport string +} + +func (rc ReporterConfig) Verbosity() VerbosityLevel { + if rc.Succinct { + return VerbosityLevelSuccinct + } else if rc.Verbose { + return VerbosityLevelVerbose + } else if rc.VeryVerbose { + return VerbosityLevelVeryVerbose + } + return VerbosityLevelNormal +} + +func (rc ReporterConfig) WillGenerateReport() bool { + return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" +} + +func NewDefaultReporterConfig() ReporterConfig { + return ReporterConfig{ + SlowSpecThreshold: 5 * time.Second, + } +} + +// Configuration for the Ginkgo CLI +type CLIConfig struct { + //for build, run, and watch + Recurse bool + SkipPackage string + RequireSuite bool + NumCompilers int + + //for run and watch only + Procs int + Parallel bool + AfterRunHook string + OutputDir string + KeepSeparateCoverprofiles bool + KeepSeparateReports bool + + //for run only + KeepGoing bool + UntilItFails bool + Repeat int + RandomizeSuites bool + + //for watch only + Depth int + WatchRegExp string +} + +func NewDefaultCLIConfig() CLIConfig { + return CLIConfig{ + Depth: 1, + WatchRegExp: `\.go$`, + } +} + +func (g CLIConfig) ComputedProcs() int { + if g.Procs > 0 { + return g.Procs + } + + n := 1 + if g.Parallel { + n = runtime.NumCPU() + if n > 4 { + n = n - 1 + } + } + return n +} + +func (g CLIConfig) ComputedNumCompilers() int { + if g.NumCompilers > 0 { + return g.NumCompilers + } + + return runtime.NumCPU() +} + +// Configuration for the Ginkgo CLI capturing available go flags +// A subset of Go flags are exposed by Ginkgo. Some are avaiable at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags). +// More details can be found at: +// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/ +type GoFlagsConfig struct { + //build-time flags for code-and-performance analysis + Race bool + Cover bool + CoverMode string + CoverPkg string + Vet string + + //run-time flags for code-and-performance analysis + BlockProfile string + BlockProfileRate int + CoverProfile string + CPUProfile string + MemProfile string + MemProfileRate int + MutexProfile string + MutexProfileFraction int + Trace string + + //build-time flags for building + A bool + ASMFlags string + BuildMode string + Compiler string + GCCGoFlags string + GCFlags string + InstallSuffix string + LDFlags string + LinkShared bool + Mod string + N bool + ModFile string + ModCacheRW bool + MSan bool + PkgDir string + Tags string + TrimPath bool + ToolExec string + Work bool + X bool +} + +func NewDefaultGoFlagsConfig() GoFlagsConfig { + return GoFlagsConfig{} +} + +func (g GoFlagsConfig) BinaryMustBePreserved() bool { + return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != "" +} + +// Configuration that were deprecated in 2.0 +type deprecatedConfig struct { + DebugParallel bool + NoisySkippings bool + NoisyPendings bool + RegexScansFilePath bool + SlowSpecThresholdWithFLoatUnits float64 + Stream bool + Notify bool +} + +// Flags + +// Flags sections used by both the CLI and the Ginkgo test process +var FlagSections = GinkgoFlagSections{ + {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"}, + {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"}, + {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"}, + {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism", + Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."}, + {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"}, + {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"}, + {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"}, + {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"}, + {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"}, + {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests", + Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."}, + {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"}, + {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"}, + {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true, + Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."}, +} + +// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI +var SuiteConfigFlags = GinkgoFlags{ + {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", + Usage: "The seed used to randomize the spec suite."}, + {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, + + {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."}, + {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, + {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + + {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, + {KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug", + Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."}, + {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h", + Usage: "Test suite fails if it does not complete within the specified timeout."}, + {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none", + Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."}, + + {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", + Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expresions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."}, + + {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, +} + +// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) +var ParallelConfigFlags = GinkgoFlags{ + {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "This worker process's (one-indexed) process number. For running specs in parallel."}, + {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "The total number of worker processes. For running specs in parallel."}, + {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI", + Usage: "The address for the server that will synchronize the processes."}, +} + +// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI +var ReporterConfigFlags = GinkgoFlags{ + {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, suppress color output in default reporter."}, + {KeyPath: "R.SlowSpecThreshold", Name: "slow-spec-threshold", SectionKey: "output", UsageArgument: "duration", UsageDefaultValue: "5s", + Usage: "Specs that take longer to run than this threshold are flagged as slow by the default reporter."}, + {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", + Usage: "If set, emits more output including GinkgoWriter contents."}, + {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", + Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."}, + {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output", + Usage: "If set, default reporter prints out a very succinct report"}, + {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", + Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, + {KeyPath: "R.AlwaysEmitGinkgoWriter", Name: "always-emit-ginkgo-writer", SectionKey: "output", DeprecatedName: "reportPassed", DeprecatedDocLink: "renamed--reportpassed", + Usage: "If set, default reporter prints out captured output of passed tests."}, + + {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", + Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, + {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", + Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, + {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", + Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."}, + + {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold", + Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, + {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, +} + +// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process +func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...) + flags = flags.WithPrefix("ginkgo") + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "D": &deprecatedConfig{}, + } + extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"} + + return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection) +} + +// VetConfig validates that the Ginkgo test process' configuration is sound +func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error { + errors := []error{} + + if flagSet.WasSet("count") || flagSet.WasSet("test.count") { + flag := flagSet.Lookup("count") + if flag == nil { + flag = flagSet.Lookup("test.count") + } + count, err := strconv.Atoi(flag.Value.String()) + if err != nil || count != 1 { + errors = append(errors, GinkgoErrors.InvalidGoFlagCount()) + } + } + + if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") { + errors = append(errors, GinkgoErrors.InvalidGoFlagParallel()) + } + + if suiteConfig.ParallelTotal < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration()) + } + + if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration()) + } + + if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" { + errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration()) + } + + if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 { + errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration()) + } + + if len(suiteConfig.FocusFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.FocusFiles) + if err != nil { + errors = append(errors, err) + } + } + + if len(suiteConfig.SkipFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.SkipFiles) + if err != nil { + errors = append(errors, err) + } + } + + if suiteConfig.LabelFilter != "" { + _, err := ParseLabelFilter(suiteConfig.LabelFilter) + if err != nil { + errors = append(errors, err) + } + } + + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { + case "", "dup", "swap", "none": + default: + errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode)) + } + + numVerbosity := 0 + for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} { + if v { + numVerbosity++ + } + } + if numVerbosity > 1 { + errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration()) + } + + return errors +} + +// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands +var GinkgoCLISharedFlags = GinkgoFlags{ + {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites", + Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."}, + {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "comma-separated list of packages", + Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."}, + {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."}, + {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)", + Usage: "When running multiple packages, the number of concurrent compilations to perform."}, +} + +// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run) +var GinkgoCLIRunAndWatchFlags = GinkgoFlags{ + {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "The number of parallel test nodes to run."}, + {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "--nodes is an alias for --procs"}, + {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel", + Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."}, + {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Command to run when a test suite completes."}, + {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support", + Usage: "A location to place all generated profiles and reports."}, + {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis", + Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."}, + {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output", + Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."}, + + {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands +var GinkgoCLIRunFlags = GinkgoFlags{ + {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, failures from earlier test suites do not prevent later test suites from running."}, + {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."}, + {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once", + Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."}, + {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize the order in which test suites run."}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands +var GinkgoCLIWatchFlags = GinkgoFlags{ + {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch", + Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."}, + {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "Regular Expression", + UsageDefaultValue: `\.go$`, + Usage: "Only files matching this regular expression will be watched for changes."}, +} + +// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI +var GoBuildFlags = GinkgoFlags{ + {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", + Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", + Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, + {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", + Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."}, + {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis", + Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`}, + {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis", + Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."}, + + {KeyPath: "Go.A", Name: "a", SectionKey: "go-build", + Usage: "force rebuilding of packages that are already up-to-date."}, + {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool asm invocation."}, + {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", + Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", + Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, + {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each gccgo compiler/linker invocation."}, + {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool compile invocation."}, + {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build", + Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."}, + {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool link invocation."}, + {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build", + Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."}, + {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build", + Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."}, + {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build", + Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."}, + {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build", + Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`}, + {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build", + Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."}, + {KeyPath: "Go.N", Name: "n", SectionKey: "go-build", + Usage: "print the commands but do not run them."}, + {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build", + Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."}, + {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build", + Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"}, + {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build", + Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`}, + {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build", + Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm '."}, + {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build", + Usage: "print the name of the temporary work directory and do not delete it when exiting."}, + {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", + Usage: "print the commands."}, +} + +// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI +var GoRunFlags = GinkgoFlags{ + {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis", + Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`}, + {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`}, + {KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`}, + {KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`}, + {KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`}, + {KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis", + Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`}, + {KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis", + Usage: `Write an execution trace to the specified file before exiting.`}, +} + +// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound +// It returns a potentially mutated copy of the config that rationalizes the configuraiton to ensure consistency for downstream consumers +func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) { + errors := []error{} + + if cliConfig.Repeat > 0 && cliConfig.UntilItFails { + errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails()) + } + + //initialize the output directory + if cliConfig.OutputDir != "" { + err := os.MkdirAll(cliConfig.OutputDir, 0777) + if err != nil { + errors = append(errors, err) + } + } + + //ensure cover mode is configured appropriately + if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" { + goFlagsConfig.CoverProfile = "coverprofile.out" + } + + return cliConfig, goFlagsConfig, errors +} + +// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) { + // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure + // the built test binary can generate a coverprofile + if goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + + args := []string{"test", "-c", "-o", destination, packageToBuild} + goArgs, err := GenerateFlagArgs( + GoBuildFlags, + map[string]interface{}{ + "Go": &goFlagsConfig, + }, + ) + + if err != nil { + return []string{}, err + } + args = append(args, goArgs...) + return args, nil +} + +// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary +func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) { + var flags GinkgoFlags + flags = SuiteConfigFlags.WithPrefix("ginkgo") + flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...) + bindings := map[string]interface{}{ + "S": &suiteConfig, + "R": &reporterConfig, + "Go": &goFlagsConfig, + } + + return GenerateFlagArgs(flags, bindings) +} + +// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary +func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) { + flags := GoRunFlags.WithPrefix("test") + bindings := map[string]interface{}{ + "Go": &goFlagsConfig, + } + + args, err := GenerateFlagArgs(flags, bindings) + if err != nil { + return args, err + } + args = append(args, "--test.v") + return args, nil +} + +// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command +func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIRunFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command +func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIWatchFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command +func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags + flags = flags.CopyAppend(GoBuildFlags...) + + bindings := map[string]interface{}{ + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Building Multiple Suites" + } + if flagSections[i].Key == "go-build" { + flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags", + Description: "These flags are inherited from go build."} + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} + +func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package") + + bindings := map[string]interface{}{ + "C": cliConfig, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Fetching Labels from Multiple Suites" + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go new file mode 100644 index 00000000..17922304 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go @@ -0,0 +1,141 @@ +package types + +import ( + "strconv" + "time" +) + +/* + A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters. +*/ + +type SuiteSummary = DeprecatedSuiteSummary +type SetupSummary = DeprecatedSetupSummary +type SpecSummary = DeprecatedSpecSummary +type SpecMeasurement = DeprecatedSpecMeasurement +type SpecComponentType = NodeType +type SpecFailure = DeprecatedSpecFailure + +var ( + SpecComponentTypeInvalid = NodeTypeInvalid + SpecComponentTypeContainer = NodeTypeContainer + SpecComponentTypeIt = NodeTypeIt + SpecComponentTypeBeforeEach = NodeTypeBeforeEach + SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach + SpecComponentTypeAfterEach = NodeTypeAfterEach + SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach + SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite + SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite + SpecComponentTypeAfterSuite = NodeTypeAfterSuite + SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite +) + +type DeprecatedSuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + NumberOfFlakedSpecs int + RunTime time.Duration +} + +type DeprecatedSetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type DeprecatedSpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*DeprecatedSpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s DeprecatedSpecSummary) HasFailureState() bool { + return s.State.Is(SpecStateFailureStates) +} + +func (s DeprecatedSpecSummary) TimedOut() bool { + return false +} + +func (s DeprecatedSpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s DeprecatedSpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s DeprecatedSpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s DeprecatedSpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s DeprecatedSpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type DeprecatedSpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type DeprecatedSpecMeasurement struct { + Name string + Info interface{} + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string + Precision int +} + +func (s DeprecatedSpecMeasurement) PrecisionFmt() string { + if s.Precision == 0 { + return "%f" + } + + str := strconv.Itoa(s.Precision) + + return "%." + str + "f" +} diff --git a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go similarity index 66% rename from vendor/github.com/onsi/ginkgo/types/deprecation_support.go rename to vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go index d5a6658f..79ca4593 100644 --- a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -6,8 +6,7 @@ import ( "strings" "unicode" - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/formatter" + "github.com/onsi/ginkgo/v2/formatter" ) type Deprecation struct { @@ -22,20 +21,12 @@ var Deprecations = deprecations{} func (d deprecations) CustomReporter() Deprecation { return Deprecation{ - Message: "You are using a custom reporter. Support for custom reporters will likely be removed in V2. Most users were using them to generate junit or teamcity reports and this functionality will be merged into the core reporter. In addition, Ginkgo 2.0 will support emitting a JSON-formatted report that users can then manipulate to generate custom reports.\n\n{{red}}{{bold}}If this change will be impactful to you please leave a comment on {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}", + Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:", DocLink: "removed-custom-reporters", Version: "1.16.0", } } -func (d deprecations) V1Reporter() Deprecation { - return Deprecation{ - Message: "You are using a V1 Ginkgo Reporter. Please update your custom reporter to the new V2 Reporter interface.", - DocLink: "changed-reporter-interface", - Version: "1.16.0", - } -} - func (d deprecations) Async() Deprecation { return Deprecation{ Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.", @@ -56,7 +47,15 @@ func (d deprecations) ParallelNode() Deprecation { return Deprecation{ Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.", DocLink: "renamed-ginkgoparallelnode", - Version: "1.16.5", + Version: "1.16.4", + } +} + +func (d deprecations) CurrentGinkgoTestDescription() Deprecation { + return Deprecation{ + Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.", + DocLink: "changed-currentginkgotestdescription", + Version: "1.16.0", } } @@ -75,6 +74,14 @@ func (d deprecations) Blur() Deprecation { } } +func (d deprecations) Nodot() Deprecation { + return Deprecation{ + Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.", + DocLink: "removed-ginkgo-nodot", + Version: "1.16.0", + } +} + type DeprecationTracker struct { deprecations map[Deprecation][]CodeLocation } @@ -107,25 +114,19 @@ func (d *DeprecationTracker) DidTrackDeprecations() bool { } func (d *DeprecationTracker) DeprecationsReport() string { - out := formatter.F("\n{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") + out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") out += formatter.F("{{light-yellow}}============================================={{/}}\n") - out += formatter.F("{{bold}}{{green}}Ginkgo 2.0{{/}} is under active development and will introduce several new features, improvements, and a small handful of breaking changes.\n") - out += formatter.F("A release candidate for 2.0 is now available and 2.0 should GA in Fall 2021. {{bold}}Please give the RC a try and send us feedback!{{/}}\n") - out += formatter.F(" - To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md{{/}}\n") - out += formatter.F(" - For instructions on using the Release Candidate visit {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta{{/}}\n") - out += formatter.F(" - To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n") - for deprecation, locations := range d.deprecations { out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n") if deprecation.DocLink != "" { - out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#%s{{/}}\n", deprecation.DocLink) + out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink) } for _, location := range locations { out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location) } } out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n") - out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", config.VERSION) + out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION) return out } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go new file mode 100644 index 00000000..1d96ae02 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go @@ -0,0 +1,43 @@ +package types + +import "encoding/json" + +type EnumSupport struct { + toString map[uint]string + toEnum map[string]uint + maxEnum uint +} + +func NewEnumSupport(toString map[uint]string) EnumSupport { + toEnum, maxEnum := map[string]uint{}, uint(0) + for k, v := range toString { + toEnum[v] = k + if maxEnum < k { + maxEnum = k + } + } + return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum} +} + +func (es EnumSupport) String(e uint) string { + if e > es.maxEnum { + return es.toString[0] + } + return es.toString[e] +} + +func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) { + var dec string + if err := json.Unmarshal(b, &dec); err != nil { + return 0, err + } + out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway + return out, nil +} + +func (es EnumSupport) MarshJSON(e uint) ([]byte, error) { + if e == 0 || e > es.maxEnum { + return json.Marshal(nil) + } + return json.Marshal(es.toString[e]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go new file mode 100644 index 00000000..6873f74d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -0,0 +1,515 @@ +package types + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoError struct { + Heading string + Message string + DocLink string + CodeLocation CodeLocation +} + +func (g GinkgoError) Error() string { + out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading) + if (g.CodeLocation != CodeLocation{}) { + contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ") + if contentsOfLine != "" { + out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine) + } + out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation) + } + if g.Message != "" { + out += formatter.Fiw(1, formatter.COLS, g.Message) + out += "\n\n" + } + if g.DocLink != "" { + out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink) + } + + return out +} + +type ginkgoErrors struct{} + +var GinkgoErrors = ginkgoErrors{} + +func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error { + return GinkgoError{ + Heading: "Your Test Panicked", + Message: `When you, or your assertion library, calls Ginkgo's Fail(), +Ginkgo panics to prevent subsequent assertions from running. + +Normally Ginkgo rescues this panic so you shouldn't see it. + +However, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. + +Alternatively, you may have made an assertion outside of a Ginkgo +leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to +an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`, + DocLink: "mental-model-how-ginkgo-handles-failure", + CodeLocation: cl, + } +} + +func (g ginkgoErrors) RerunningSuite() error { + return GinkgoError{ + Heading: "Rerunning Suite", + Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`), + DocLink: "repeating-spec-runs-and-managing-flaky-specs", + } +} + +/* Tree construction errors */ + +func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node +to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running. + +To enable randomization and parallelization Ginkgo requires the spec tree +to be fully construted up front. In practice, this means that you can +only create nodes like {{bold}}[%s]{{/}} at the top-level or within the +body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy", + } +} + +func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error { + return GinkgoError{ + Heading: "Assertion or Panic detected during tree construction", + Message: formatter.F( + `Ginkgo detected a panic while constructing the spec tree. +You may be trying to make an assertion in the body of a container node +(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}). + +Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}}, +{{bold}}It{{/}}, etc. + +{{bold}}Here's the content of the panic that was caught:{{/}} +%v`, caughtPanic), + CodeLocation: cl, + DocLink: "no-assertions-in-container-nodes", + } +} + +func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node but +you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}. + +Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown), + CodeLocation: cl, + DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite", + } +} + +/* Decorator errors */ +func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error { + return GinkgoError{ + Heading: "Invalid Decorator", + Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: Focused and Pending", + Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { + return GinkgoError{ + Heading: "Unkown Decorator", + Message: formatter.F(`[%s] node was passed an unkown decorator: '%#v'`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. +You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Multiple Functions", + Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but more than one was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Missing Functions", + Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but none was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +/* Ordered Container errors */ +func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Serial Node in Non-Serial Ordered Container", + Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Setup Node not in Ordered Container", + Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType), + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + +/* DeferCleanup errors */ +func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup requires a valid function", + Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup must be called inside a setup or subject node", + Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), + Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a ReportAfterEach or ReportAfterSuite.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup cannot be called in a DeferCleanup callback", + Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +/* ReportEntry errors */ +func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error { + return GinkgoError{ + Heading: "Too Many ReportEntry Values", + Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +/* FileFilter and SkipFilter errors */ +func (g ginkgoErrors) InvalidFileFilter(filter string) error { + return GinkgoError{ + Heading: "Invalid File Filter", + Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter), + DocLink: "filtering-specs", + } +} + +func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error { + return GinkgoError{ + Heading: "Invalid File Filter Regular Expression", + Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err), + DocLink: "filtering-specs", + } +} + +/* Label Errors */ +func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error { + var message string + if location >= 0 { + for i, r := range input { + if i == location { + message += "{{red}}{{bold}}{{underline}}" + } + message += string(r) + if i == location { + message += "{{/}}" + } + } + } else { + message = input + } + message += "\n" + error + return GinkgoError{ + Heading: "Syntax Error Parsing Label Filter", + Message: message, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Label", + Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label), + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty Label", + Message: "Labels cannot be empty", + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +/* Table errors */ +func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed multiple functions", + Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Entry description", + Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too few parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too many parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +/* Parallel Synchronization errors */ + +func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { + return GinkgoError{ + Heading: "Test Report unavailable because a Ginkgo parallel process disappeared", + Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error { + return GinkgoError{ + Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1", + Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { + return GinkgoError{ + Heading: "Process #1 disappeard before SynchronizedBeforeSuite could report back", + Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.", + } +} + +/* Configuration errors */ + +func (g ginkgoErrors) UnkownTypePassedToRunSpecs(value interface{}) error { + return GinkgoError{ + Heading: "Unkown Type pased to RunSpecs", + Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), + } +} + +var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead." + +func (g ginkgoErrors) InvalidParallelTotalConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.total must be >= 1", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) InvalidParallelProcessConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) MissingParallelHostConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.host is missing", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) UnreachableParallelHost(host string) error { + return GinkgoError{ + Heading: "Could not reach ginkgo.parallel.host:" + host, + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) DryRunInParallelConfiguration() error { + return GinkgoError{ + Heading: "Ginkgo only performs -dryRun in serial mode.", + Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.", + } +} + +func (g ginkgoErrors) ConflictingVerbosityConfiguration() error { + return GinkgoError{ + Heading: "Conflicting reporter verbosity settings.", + Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!", + } +} + +func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error { + return GinkgoError{ + Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value), + Message: "You must choose one of 'dup', 'swap', or 'none'.", + } +} + +func (g ginkgoErrors) InvalidGoFlagCount() error { + return GinkgoError{ + Heading: "Use of go test -count", + Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.", + } +} + +func (g ginkgoErrors) InvalidGoFlagParallel() error { + return GinkgoError{ + Heading: "Use of go test -parallel", + Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.", + } +} + +func (g ginkgoErrors) BothRepeatAndUntilItFails() error { + return GinkgoError{ + Heading: "--repeat and --until-it-fails are both set", + Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?", + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go new file mode 100644 index 00000000..cc21df71 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go @@ -0,0 +1,106 @@ +package types + +import ( + "regexp" + "strconv" + "strings" +) + +func ParseFileFilters(filters []string) (FileFilters, error) { + ffs := FileFilters{} + for _, filter := range filters { + ff := FileFilter{} + if filter == "" { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + components := strings.Split(filter, ":") + if !(len(components) == 1 || len(components) == 2) { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + + var err error + ff.Filename, err = regexp.Compile(components[0]) + if err != nil { + return nil, err + } + if len(components) == 2 { + lineFilters := strings.Split(components[1], ",") + for _, lineFilter := range lineFilters { + components := strings.Split(lineFilter, "-") + if len(components) == 1 { + line, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1}) + } else if len(components) == 2 { + line1, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + line2, err := strconv.Atoi(strings.TrimSpace(components[1])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2}) + } else { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + } + } + ffs = append(ffs, ff) + } + return ffs, nil +} + +type FileFilter struct { + Filename *regexp.Regexp + LineFilters LineFilters +} + +func (f FileFilter) Matches(locations []CodeLocation) bool { + for _, location := range locations { + if f.Filename.MatchString(location.FileName) && + f.LineFilters.Matches(location.LineNumber) { + return true + } + + } + return false +} + +type FileFilters []FileFilter + +func (ffs FileFilters) Matches(locations []CodeLocation) bool { + for _, ff := range ffs { + if ff.Matches(locations) { + return true + } + } + + return false +} + +type LineFilter struct { + Min int + Max int +} + +func (lf LineFilter) Matches(line int) bool { + return lf.Min <= line && line < lf.Max +} + +type LineFilters []LineFilter + +func (lfs LineFilters) Matches(line int) bool { + if len(lfs) == 0 { + return true + } + + for _, lf := range lfs { + if lf.Matches(line) { + return true + } + } + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go new file mode 100644 index 00000000..7d8a73fe --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -0,0 +1,489 @@ +package types + +import ( + "flag" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoFlag struct { + Name string + KeyPath string + SectionKey string + + Usage string + UsageArgument string + UsageDefaultValue string + + DeprecatedName string + DeprecatedDocLink string + DeprecatedVersion string + + ExportAs string +} + +type GinkgoFlags []GinkgoFlag + +func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags { + out := GinkgoFlags{} + out = append(out, f...) + out = append(out, flags...) + return out +} + +func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags { + if prefix == "" { + return f + } + out := GinkgoFlags{} + for _, flag := range f { + if flag.Name != "" { + flag.Name = prefix + "." + flag.Name + } + if flag.DeprecatedName != "" { + flag.DeprecatedName = prefix + "." + flag.DeprecatedName + } + if flag.ExportAs != "" { + flag.ExportAs = prefix + "." + flag.ExportAs + } + out = append(out, flag) + } + return out +} + +func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags { + out := GinkgoFlags{} + for _, flag := range f { + for _, name := range names { + if flag.Name == name { + out = append(out, flag) + break + } + } + } + return out +} + +type GinkgoFlagSection struct { + Key string + Style string + Succinct bool + Heading string + Description string +} + +type GinkgoFlagSections []GinkgoFlagSection + +func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) { + for _, section := range gfs { + if section.Key == key { + return section, true + } + } + + return GinkgoFlagSection{}, false +} + +type GinkgoFlagSet struct { + flags GinkgoFlags + bindings interface{} + + sections GinkgoFlagSections + extraGoFlagsSection GinkgoFlagSection + + flagSet *flag.FlagSet +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet +func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + }, nil) +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet +func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + extraGoFlagsSection: extraGoFlagsSection, + }, flagSet) +} + +func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) { + if flagSet == nil { + f.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + //supress all output as Ginkgo is reponsible for formatting usage + f.flagSet.SetOutput(io.Discard) + } else { + f.flagSet = flagSet + //we're piggybacking on an existing flagset (typically go test) so we have limited control + //on user feedback + f.flagSet.Usage = f.substituteUsage + } + + for _, flag := range f.flags { + name := flag.Name + + deprecatedUsage := "[DEPRECATED]" + deprecatedName := flag.DeprecatedName + if name != "" { + deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name) + } else if flag.Usage != "" { + deprecatedUsage += " " + flag.Usage + } + + value, ok := valueAtKeyPath(f.bindings, flag.KeyPath) + if !ok { + return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface, addr := value.Interface(), value.Addr().Interface() + + switch value.Type() { + case reflect.TypeOf(string("")): + if name != "" { + f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage) + } + case reflect.TypeOf(int64(0)): + if name != "" { + f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage) + } + case reflect.TypeOf(float64(0)): + if name != "" { + f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage) + } + case reflect.TypeOf(int(0)): + if name != "" { + f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage) + } + case reflect.TypeOf(bool(true)): + if name != "" { + f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage) + } + case reflect.TypeOf(time.Duration(0)): + if name != "" { + f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage) + } + + case reflect.TypeOf([]string{}): + if name != "" { + f.flagSet.Var(stringSliceVar{value}, name, flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage) + } + default: + return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return f, nil +} + +func (f GinkgoFlagSet) IsZero() bool { + return f.flagSet == nil +} + +func (f GinkgoFlagSet) WasSet(name string) bool { + found := false + f.flagSet.Visit(func(f *flag.Flag) { + if f.Name == name { + found = true + } + }) + + return found +} + +func (f GinkgoFlagSet) Lookup(name string) *flag.Flag { + return f.flagSet.Lookup(name) +} + +func (f GinkgoFlagSet) Parse(args []string) ([]string, error) { + if f.IsZero() { + return args, nil + } + err := f.flagSet.Parse(args) + if err != nil { + return []string{}, err + } + return f.flagSet.Args(), nil +} + +func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) { + if f.IsZero() { + return + } + f.flagSet.Visit(func(flag *flag.Flag) { + for _, ginkgoFlag := range f.flags { + if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) { + message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName) + if ginkgoFlag.Name != "" { + message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name) + } else if ginkgoFlag.Usage != "" { + message += " " + ginkgoFlag.Usage + } + + deprecationTracker.TrackDeprecation(Deprecation{ + Message: message, + DocLink: ginkgoFlag.DeprecatedDocLink, + Version: ginkgoFlag.DeprecatedVersion, + }) + } + } + }) +} + +func (f GinkgoFlagSet) Usage() string { + if f.IsZero() { + return "" + } + groupedFlags := map[GinkgoFlagSection]GinkgoFlags{} + ungroupedFlags := GinkgoFlags{} + managedFlags := map[string]bool{} + extraGoFlags := []*flag.Flag{} + + for _, flag := range f.flags { + managedFlags[flag.Name] = true + managedFlags[flag.DeprecatedName] = true + + if flag.Name == "" { + continue + } + + section, ok := f.sections.Lookup(flag.SectionKey) + if ok { + groupedFlags[section] = append(groupedFlags[section], flag) + } else { + ungroupedFlags = append(ungroupedFlags, flag) + } + } + + f.flagSet.VisitAll(func(flag *flag.Flag) { + if !managedFlags[flag.Name] { + extraGoFlags = append(extraGoFlags, flag) + } + }) + + out := "" + for _, section := range f.sections { + flags := groupedFlags[section] + if len(flags) == 0 { + continue + } + out += f.usageForSection(section) + if section.Succinct { + succinctFlags := []string{} + for _, flag := range flags { + if flag.Name != "" { + succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name)) + } + } + out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n") + } else { + for _, flag := range flags { + out += f.usageForFlag(flag, section.Style) + } + } + out += "\n" + } + if len(ungroupedFlags) > 0 { + for _, flag := range ungroupedFlags { + out += f.usageForFlag(flag, "") + } + out += "\n" + } + if len(extraGoFlags) > 0 { + out += f.usageForSection(f.extraGoFlagsSection) + for _, goFlag := range extraGoFlags { + out += f.usageForGoFlag(goFlag) + } + } + + return out +} + +func (f GinkgoFlagSet) substituteUsage() { + fmt.Fprintln(f.flagSet.Output(), f.Usage()) +} + +func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) { + if len(keyPath) == 0 { + return reflect.Value{}, false + } + + val := reflect.ValueOf(root) + components := strings.Split(keyPath, ".") + for _, component := range components { + val = reflect.Indirect(val) + switch val.Kind() { + case reflect.Map: + val = val.MapIndex(reflect.ValueOf(component)) + if val.Kind() == reflect.Interface { + val = reflect.ValueOf(val.Interface()) + } + case reflect.Struct: + val = val.FieldByName(component) + default: + return reflect.Value{}, false + } + if (val == reflect.Value{}) { + return reflect.Value{}, false + } + } + + return val, true +} + +func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string { + out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n") + if section.Description != "" { + out += formatter.Fiw(0, formatter.COLS, section.Description+"\n") + } + return out +} + +func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string { + argument := flag.UsageArgument + defValue := flag.UsageDefaultValue + if argument == "" { + value, _ := valueAtKeyPath(f.bindings, flag.KeyPath) + switch value.Type() { + case reflect.TypeOf(string("")): + argument = "string" + case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)): + argument = "int" + case reflect.TypeOf(time.Duration(0)): + argument = "duration" + case reflect.TypeOf(float64(0)): + argument = "float" + case reflect.TypeOf([]string{}): + argument = "string" + } + } + if argument != "" { + argument = "[" + argument + "] " + } + if defValue != "" { + defValue = fmt.Sprintf("(default: %s)", defValue) + } + hyphens := "--" + if len(flag.Name) == 1 { + hyphens = "-" + } + + out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue) + out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage) + return out +} + +func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string { + //Taken directly from the flag package + out := fmt.Sprintf(" -%s", goFlag.Name) + name, usage := flag.UnquoteUsage(goFlag) + if len(name) > 0 { + out += " " + name + } + if len(out) <= 4 { + out += "\t" + } else { + out += "\n \t" + } + out += strings.ReplaceAll(usage, "\n", "\n \t") + out += "\n" + return out +} + +type stringSliceVar struct { + slice reflect.Value +} + +func (ssv stringSliceVar) String() string { return "" } +func (ssv stringSliceVar) Set(s string) error { + ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s}))) + return nil +} + +//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { + result := []string{} + for _, flag := range flags { + name := flag.ExportAs + if name == "" { + name = flag.Name + } + if name == "" { + continue + } + + value, ok := valueAtKeyPath(bindings, flag.KeyPath) + if !ok { + return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface := value.Interface() + switch value.Type() { + case reflect.TypeOf(string("")): + if iface.(string) != "" { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + case reflect.TypeOf(int64(0)): + if iface.(int64) != 0 { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(float64(0)): + if iface.(float64) != 0 { + result = append(result, fmt.Sprintf("--%s=%f", name, iface)) + } + case reflect.TypeOf(int(0)): + if iface.(int) != 0 { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(bool(true)): + if iface.(bool) { + result = append(result, fmt.Sprintf("--%s", name)) + } + case reflect.TypeOf(time.Duration(0)): + if iface.(time.Duration) != time.Duration(0) { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + + case reflect.TypeOf([]string{}): + strings := iface.([]string) + for _, s := range strings { + result = append(result, fmt.Sprintf("--%s=%s", name, s)) + } + default: + return []string{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return result, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go new file mode 100644 index 00000000..0403f9e6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -0,0 +1,347 @@ +package types + +import ( + "fmt" + "regexp" + "strings" +) + +var DEBUG_LABEL_FILTER_PARSING = false + +type LabelFilter func([]string) bool + +func matchLabelAction(label string) LabelFilter { + expected := strings.ToLower(label) + return func(labels []string) bool { + for i := range labels { + if strings.ToLower(labels[i]) == expected { + return true + } + } + return false + } +} + +func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter { + return func(labels []string) bool { + for i := range labels { + if regex.MatchString(labels[i]) { + return true + } + } + return false + } +} + +func notAction(filter LabelFilter) LabelFilter { + return func(labels []string) bool { return !filter(labels) } +} + +func andAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) && b(labels) } +} + +func orAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) || b(labels) } +} + +type lfToken uint + +const ( + lfTokenInvalid lfToken = iota + + lfTokenRoot + lfTokenOpenGroup + lfTokenCloseGroup + lfTokenNot + lfTokenAnd + lfTokenOr + lfTokenRegexp + lfTokenLabel + lfTokenEOF +) + +func (l lfToken) Precedence() int { + switch l { + case lfTokenRoot, lfTokenOpenGroup: + return 0 + case lfTokenOr: + return 1 + case lfTokenAnd: + return 2 + case lfTokenNot: + return 3 + } + return -1 +} + +func (l lfToken) String() string { + switch l { + case lfTokenRoot: + return "ROOT" + case lfTokenOpenGroup: + return "(" + case lfTokenCloseGroup: + return ")" + case lfTokenNot: + return "!" + case lfTokenAnd: + return "&&" + case lfTokenOr: + return "||" + case lfTokenRegexp: + return "/regexp/" + case lfTokenLabel: + return "label" + case lfTokenEOF: + return "EOF" + } + return "INVALID" +} + +type treeNode struct { + token lfToken + location int + value string + + parent *treeNode + leftNode *treeNode + rightNode *treeNode +} + +func (tn *treeNode) setRightNode(node *treeNode) { + tn.rightNode = node + node.parent = tn +} + +func (tn *treeNode) setLeftNode(node *treeNode) { + tn.leftNode = node + node.parent = tn +} + +func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode { + if tn.token.Precedence() <= precedence { + return tn + } + return tn.parent.firstAncestorWithPrecedenceLEQ(precedence) +} + +func (tn *treeNode) firstUnmatchedOpenNode() *treeNode { + if tn.token == lfTokenOpenGroup { + return tn + } + if tn.parent == nil { + return nil + } + return tn.parent.firstUnmatchedOpenNode() +} + +func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { + switch tn.token { + case lfTokenOpenGroup: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.") + case lfTokenLabel: + return matchLabelAction(tn.value), nil + case lfTokenRegexp: + re, err := regexp.Compile(tn.value) + if err != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) + } + return matchLabelRegexAction(re), nil + } + + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.") + } + rightLF, err := tn.rightNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenRoot, lfTokenCloseGroup: + return rightLF, nil + case lfTokenNot: + return notAction(rightLF), nil + } + + if tn.leftNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token)) + } + leftLF, err := tn.leftNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenAnd: + return andAction(leftLF, rightLF), nil + case lfTokenOr: + return orAction(leftLF, rightLF), nil + } + + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token)) +} + +func (tn *treeNode) tokenString() string { + out := fmt.Sprintf("<%s", tn.token) + if tn.value != "" { + out += " | " + tn.value + } + out += ">" + return out +} + +func (tn *treeNode) toString(indent int) string { + out := tn.tokenString() + "\n" + if tn.leftNode != nil { + out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1)) + } + if tn.rightNode != nil { + out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1)) + } + return out +} + +func tokenize(input string) func() (*treeNode, error) { + runes, i := []rune(input), 0 + + peekIs := func(r rune) bool { + if i+1 < len(runes) { + return runes[i+1] == r + } + return false + } + + consumeUntil := func(cutset string) (string, int) { + j := i + for ; j < len(runes); j++ { + if strings.IndexRune(cutset, runes[j]) >= 0 { + break + } + } + return string(runes[i:j]), j - i + } + + return func() (*treeNode, error) { + for i < len(runes) && runes[i] == ' ' { + i += 1 + } + + if i >= len(runes) { + return &treeNode{token: lfTokenEOF}, nil + } + + node := &treeNode{location: i} + switch runes[i] { + case '&': + if !peekIs('&') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?") + } + i += 2 + node.token = lfTokenAnd + case '|': + if !peekIs('|') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?") + } + i += 2 + node.token = lfTokenOr + case '!': + i += 1 + node.token = lfTokenNot + case ',': + i += 1 + node.token = lfTokenOr + case '(': + i += 1 + node.token = lfTokenOpenGroup + case ')': + i += 1 + node.token = lfTokenCloseGroup + case '/': + i += 1 + value, n := consumeUntil("/") + i += n + 1 + node.token, node.value = lfTokenRegexp, value + default: + value, n := consumeUntil("&|!,()/") + i += n + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) + } + return node, nil + } +} + +func ParseLabelFilter(input string) (LabelFilter, error) { + if DEBUG_LABEL_FILTER_PARSING { + fmt.Println("\n==============") + fmt.Println("Input: ", input) + fmt.Print("Tokens: ") + } + nextToken := tokenize(input) + + root := &treeNode{token: lfTokenRoot} + current := root +LOOP: + for { + node, err := nextToken() + if err != nil { + return nil, err + } + + if DEBUG_LABEL_FILTER_PARSING { + fmt.Print(node.tokenString() + " ") + } + + switch node.token { + case lfTokenEOF: + break LOOP + case lfTokenLabel, lfTokenRegexp: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") + } + current.setRightNode(node) + case lfTokenNot, lfTokenOpenGroup: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token)) + } + current.setRightNode(node) + current = node + case lfTokenAnd, lfTokenOr: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token)) + } + nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence()) + node.setLeftNode(nodeToStealFrom.rightNode) + nodeToStealFrom.setRightNode(node) + current = node + case lfTokenCloseGroup: + firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() + if firstUnmatchedOpenNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.") + } + if firstUnmatchedOpenNode == current && current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.") + } + firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed + current = firstUnmatchedOpenNode.parent + default: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token)) + } + } + if DEBUG_LABEL_FILTER_PARSING { + fmt.Printf("\n Tree:\n%s", root.toString(0)) + } + return root.constructLabelFilter(input) +} + +func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { + out := strings.TrimSpace(label) + if out == "" { + return "", GinkgoErrors.InvalidEmptyLabel(cl) + } + if strings.ContainsAny(out, "&|!,()/") { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + return out, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go new file mode 100644 index 00000000..a85281a4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -0,0 +1,185 @@ +package types + +import ( + "encoding/json" + "fmt" + "time" +) + +//ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports +//and across the network connection when running in parallel +type ReportEntryValue struct { + raw interface{} //unexported to prevent gob from freaking out about unregistered structs + AsJSON string + Representation string +} + +func WrapEntryValue(value interface{}) ReportEntryValue { + return ReportEntryValue{ + raw: value, + } +} + +func (rev ReportEntryValue) GetRawValue() interface{} { + return rev.raw +} + +func (rev ReportEntryValue) String() string { + if rev.raw == nil { + return "" + } + if colorableStringer, ok := rev.raw.(ColorableStringer); ok { + return colorableStringer.ColorableString() + } + + if stringer, ok := rev.raw.(fmt.Stringer); ok { + return stringer.String() + } + if rev.Representation != "" { + return rev.Representation + } + return fmt.Sprintf("%+v", rev.raw) +} + +func (rev ReportEntryValue) MarshalJSON() ([]byte, error) { + //All this to capture the representaiton at encoding-time, not creating time + //This way users can Report on pointers and get their final values at reporting-time + out := struct { + AsJSON string + Representation string + }{ + Representation: rev.String(), + } + + asJSON, err := json.Marshal(rev.raw) + if err != nil { + return nil, err + } + out.AsJSON = string(asJSON) + + return json.Marshal(out) +} + +func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error { + in := struct { + AsJSON string + Representation string + }{} + err := json.Unmarshal(data, &in) + if err != nil { + return err + } + rev.AsJSON = in.AsJSON + rev.Representation = in.Representation + return json.Unmarshal([]byte(in.AsJSON), &(rev.raw)) +} + +func (rev ReportEntryValue) GobEncode() ([]byte, error) { + return rev.MarshalJSON() +} + +func (rev *ReportEntryValue) GobDecode(data []byte) error { + return rev.UnmarshalJSON(data) +} + +// ReportEntry captures information attached to `SpecReport` via `AddReportEntry` +type ReportEntry struct { + // Visibility captures the visibility policy for this ReportEntry + Visibility ReportEntryVisibility + // Time captures the time the AddReportEntry was called + Time time.Time + // Location captures the location of the AddReportEntry call + Location CodeLocation + // Name captures the name of this report + Name string + // Value captures the (optional) object passed into AddReportEntry - this can be + // anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make + // encoding/decoding the value easier. To access the raw value call entry.GetRawValue() + Value ReportEntryValue +} + +// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableStirng() is used to generate their representation. +type ColorableStringer interface { + ColorableString() string +} + +// StringRepresentation() returns the string representation of the value associated with the ReportEntry -- +// if value is nil, empty string is returned +// if value is a `ColorableStringer` then `Value.ColorableString()` is returned +// if value is a `fmt.Stringer` then `Value.String()` is returned +// otherwise the value is formatted with "%+v" +func (entry ReportEntry) StringRepresentation() string { + return entry.Value.String() +} + +// GetRawValue returns the Value object that was passed to AddReportEntry +// If called in-process this will be the same object that was passed into AddReportEntry. +// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be +// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON +// field yourself. +func (entry ReportEntry) GetRawValue() interface{} { + return entry.Value.GetRawValue() +} + +type ReportEntries []ReportEntry + +func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool { + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + return true + } + } + return false +} + +func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries { + out := ReportEntries{} + + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + out = append(out, entry) + } + } + + return out +} + +// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter +type ReportEntryVisibility uint + +const ( + // Always print out this ReportEntry + ReportEntryVisibilityAlways ReportEntryVisibility = iota + // Only print out this ReportEntry if the spec fails or if the test is run with -v + ReportEntryVisibilityFailureOrVerbose + // Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.)) + ReportEntryVisibilityNever +) + +var revEnumSupport = NewEnumSupport(map[uint]string{ + uint(ReportEntryVisibilityAlways): "always", + uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose", + uint(ReportEntryVisibilityNever): "never", +}) + +func (rev ReportEntryVisibility) String() string { + return revEnumSupport.String(uint(rev)) +} +func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error { + out, err := revEnumSupport.UnmarshJSON(b) + *rev = ReportEntryVisibility(out) + return err +} +func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) { + return revEnumSupport.MarshJSON(uint(rev)) +} + +func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool { + for _, visibility := range visibilities { + if v == visibility { + return true + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go new file mode 100644 index 00000000..42b5644c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -0,0 +1,544 @@ +package types + +import ( + "encoding/json" + "strings" + "time" +) + +const GINKGO_FOCUS_EXIT_CODE = 197 +const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +// Report captures information about a Ginkgo test run +type Report struct { + //SuitePath captures the absolute path to the test suite + SuitePath string + + //SuiteDescription captures the description string passed to the DSL's RunSpecs() function + SuiteDescription string + + //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function + SuiteLabels []string + + //SuiteSucceeded captures the success or failure status of the test run + //If true, the test run is considered successful. + //If false, the test run is considered unsucccessful + SuiteSucceeded bool + + //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programatically focused + //(i.e an `FIt` or an `FDescribe` + SuiteHasProgrammaticFocus bool + + //SpecialSuiteFailureReasons may contain special failure reasons + //For example, a test suite might be considered "failed" even if none of the individual specs + //have a failure state. For example, if the user has configured --fail-on-pending the test suite + //will have failed if there are pending tests even though all non-pending tests may have passed. In such + //cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure. + //SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user. + //Since multiple special failure reasons can occur, this field is a slice. + SpecialSuiteFailureReasons []string + + //PreRunStats contains a set of stats captured before the test run begins. This is primarily used + //by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) + //and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. + PreRunStats PreRunStats + + //StartTime and EndTime capture the start and end time of the test run + StartTime time.Time + EndTime time.Time + + //RunTime captures the duration of the test run + RunTime time.Duration + + //SuiteConfig captures the Ginkgo configuration governing this test run + //SuiteConfig includes information necessary for reproducing an identical test run, + //such as the random seed and any filters applied during the test run + SuiteConfig SuiteConfig + + //SpecReports is a list of all SpecReports generated by this test run + SpecReports SpecReports +} + +//PreRunStats contains a set of stats captured before the test run begins. This is primarily used +//by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) +//and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. +type PreRunStats struct { + TotalSpecs int + SpecsThatWillRun int +} + +//Add is ued by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +//to form a complete final report. +func (report Report) Add(other Report) Report { + report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded + + if other.StartTime.Before(report.StartTime) { + report.StartTime = other.StartTime + } + + if other.EndTime.After(report.EndTime) { + report.EndTime = other.EndTime + } + + specialSuiteFailureReasons := []string{} + reasonsLookup := map[string]bool{} + for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} { + for _, reason := range reasons { + if !reasonsLookup[reason] { + reasonsLookup[reason] = true + specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason) + } + } + } + report.SpecialSuiteFailureReasons = specialSuiteFailureReasons + report.RunTime = report.EndTime.Sub(report.StartTime) + + reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports)) + for i := range report.SpecReports { + reports[i] = report.SpecReports[i] + } + offset := len(report.SpecReports) + for i := range other.SpecReports { + reports[i+offset] = other.SpecReports[i] + } + + report.SpecReports = reports + return report +} + +// SpecReport captures information about a Ginkgo spec. +type SpecReport struct { + // ContainerHierarchyTexts is a slice containing the text strings of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyTexts []string + + // ContainerHierarchyLocations is a slice containing the CodeLocations of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyLocations []CodeLocation + + // ContainerHierarchyLabels is a slice containing the labels of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchyLabels [][]string + + // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be + // one of the NodeTypesForSuiteLevelNodes node types) + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + + // State captures whether the spec has passed, failed, etc. + State SpecState + + // IsSerial captures whether the spec has the Serial decorator + IsSerial bool + + // IsInOrderedContainer captures whether the spec appears in an Ordered container + IsInOrderedContainer bool + + // StartTime and EndTime capture the start and end time of the spec + StartTime time.Time + EndTime time.Time + + // RunTime captures the duration of the spec + RunTime time.Duration + + // ParallelProcess captures the parallel process that this spec ran on + ParallelProcess int + + //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip()) + //It includes detailed information about the Failure + Failure Failure + + // NumAttempts captures the number of times this Spec was run. Flakey specs can be retried with + // ginkgo --flake-attempts=N + NumAttempts int + + // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter + CapturedGinkgoWriterOutput string + + // CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel) + // This is always empty when running in series or calling CurrentSpecReport() + // It is used internally by Ginkgo's reporter + CapturedStdOutErr string + + // ReportEntries contains any reports added via `AddReportEntry` + ReportEntries ReportEntries +} + +func (report SpecReport) MarshalJSON() ([]byte, error) { + //All this to avoid emitting an empty Failure struct in the JSON + out := struct { + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + }{ + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, + } + + if !report.Failure.IsZero() { + out.Failure = &(report.Failure) + } + if len(report.ReportEntries) > 0 { + out.ReportEntries = report.ReportEntries + } + + return json.Marshal(out) +} + +// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput +// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty. +// CombinedOutput() is used internally by Ginkgo's reporter. +func (report SpecReport) CombinedOutput() string { + if report.CapturedStdOutErr == "" { + return report.CapturedGinkgoWriterOutput + } + if report.CapturedGinkgoWriterOutput == "" { + return report.CapturedStdOutErr + } + return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput +} + +//Failed returns true if report.State is one of the SpecStateFailureStates +// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted) +func (report SpecReport) Failed() bool { + return report.State.Is(SpecStateFailureStates) +} + +//FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +func (report SpecReport) FullText() string { + texts := []string{} + texts = append(texts, report.ContainerHierarchyTexts...) + if report.LeafNodeText != "" { + texts = append(texts, report.LeafNodeText) + } + return strings.Join(texts, " ") +} + +//Labels returns a deduped set of all the spec's Labels. +func (report SpecReport) Labels() []string { + out := []string{} + seen := map[string]bool{} + for _, labels := range report.ContainerHierarchyLabels { + for _, label := range labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + for _, label := range report.LeafNodeLabels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + + return out +} + +//MatchesLabelFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { + filter, err := ParseLabelFilter(query) + if err != nil { + return false, err + } + return filter(report.Labels()), nil +} + +//FileName() returns the name of the file containing the spec +func (report SpecReport) FileName() string { + return report.LeafNodeLocation.FileName +} + +//LineNumber() returns the line number of the leaf node +func (report SpecReport) LineNumber() int { + return report.LeafNodeLocation.LineNumber +} + +//FailureMessage() returns the failure message (or empty string if the test hasn't failed) +func (report SpecReport) FailureMessage() string { + return report.Failure.Message +} + +//FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) +func (report SpecReport) FailureLocation() CodeLocation { + return report.Failure.Location +} + +type SpecReports []SpecReport + +//WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes +func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { + count := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + count++ + } + } + + out := make(SpecReports, count) + j := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + out[j] = reports[i] + j++ + } + } + return out +} + +//WithState returns the subset of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) WithState(states SpecState) SpecReports { + count := 0 + for i := range reports { + if reports[i].State.Is(states) { + count++ + } + } + + out, j := make(SpecReports, count), 0 + for i := range reports { + if reports[i].State.Is(states) { + out[j] = reports[i] + j++ + } + } + return out +} + +//CountWithState returns the number of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) CountWithState(states SpecState) int { + n := 0 + for i := range reports { + if reports[i].State.Is(states) { + n += 1 + } + } + return n +} + +//CountWithState returns the number of SpecReports that passed after multiple attempts +func (reports SpecReports) CountOfFlakedSpecs() int { + n := 0 + for i := range reports { + if reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// Failure captures failure information for an individual test +type Failure struct { + // Message - the failure message passed into Fail(...). When using a matcher library + // like Gomega, this will contain the failure message generated by Gomega. + // + // Message is also populated if hte user has called Skip(...). + Message string + + // Location - the CodeLocation where the failure occurred + // This CodeLocation will include a fully-populated StackTrace + Location CodeLocation + + // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked) + // then ForwardedPanic will be populated with a string representation of the captured panic. + ForwardedPanic string `json:",omitempty"` + + // FailureNodeContext - one of three contexts describing the node in which the failure occured: + // FailureNodeIsLeafNode means the failure occured in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated + // FailureNodeAtTopLevel means the failure occured in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated. + // FailureNodeInContainer means the failure occured in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocaiton, and FailureNodeContainerIndex will be populated. + // + // FailureNodeType will contain the NodeType of the node in which the failure occurred. + // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. + // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred. + FailureNodeContext FailureNodeContext + FailureNodeType NodeType + FailureNodeLocation CodeLocation + FailureNodeContainerIndex int +} + +func (f Failure) IsZero() bool { + return f == Failure{} +} + +// FailureNodeContext captures the location context for the node containing the failing line of code +type FailureNodeContext uint + +const ( + FailureNodeContextInvalid FailureNodeContext = iota + + FailureNodeIsLeafNode + FailureNodeAtTopLevel + FailureNodeInContainer +) + +var fncEnumSupport = NewEnumSupport(map[uint]string{ + uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT", + uint(FailureNodeIsLeafNode): "leaf-node", + uint(FailureNodeAtTopLevel): "top-level", + uint(FailureNodeInContainer): "in-container", +}) + +func (fnc FailureNodeContext) String() string { + return fncEnumSupport.String(uint(fnc)) +} +func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error { + out, err := fncEnumSupport.UnmarshJSON(b) + *fnc = FailureNodeContext(out) + return err +} +func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) { + return fncEnumSupport.MarshJSON(uint(fnc)) +} + +// SpecState captures the state of a spec +// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` +type SpecState uint + +const ( + SpecStateInvalid SpecState = 0 + + SpecStatePending SpecState = 1 << iota + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStateAborted + SpecStatePanicked + SpecStateInterrupted +) + +var ssEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecStateInvalid): "INVALID SPEC STATE", + uint(SpecStatePending): "pending", + uint(SpecStateSkipped): "skipped", + uint(SpecStatePassed): "passed", + uint(SpecStateFailed): "failed", + uint(SpecStateAborted): "aborted", + uint(SpecStatePanicked): "panicked", + uint(SpecStateInterrupted): "interrupted", +}) + +func (ss SpecState) String() string { + return ssEnumSupport.String(uint(ss)) +} +func (ss *SpecState) UnmarshalJSON(b []byte) error { + out, err := ssEnumSupport.UnmarshJSON(b) + *ss = SpecState(out) + return err +} +func (ss SpecState) MarshalJSON() ([]byte, error) { + return ssEnumSupport.MarshJSON(uint(ss)) +} + +var SpecStateFailureStates = SpecStateFailed | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted + +func (ss SpecState) Is(states SpecState) bool { + return ss&states != 0 +} + +// NodeType captures the type of a given Ginkgo Node +type NodeType uint + +const ( + NodeTypeInvalid NodeType = 0 + + NodeTypeContainer NodeType = 1 << iota + NodeTypeIt + + NodeTypeBeforeEach + NodeTypeJustBeforeEach + NodeTypeAfterEach + NodeTypeJustAfterEach + + NodeTypeBeforeAll + NodeTypeAfterAll + + NodeTypeBeforeSuite + NodeTypeSynchronizedBeforeSuite + NodeTypeAfterSuite + NodeTypeSynchronizedAfterSuite + + NodeTypeReportBeforeEach + NodeTypeReportAfterEach + NodeTypeReportAfterSuite + + NodeTypeCleanupInvalid + NodeTypeCleanupAfterEach + NodeTypeCleanupAfterAll + NodeTypeCleanupAfterSuite +) + +var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt +var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite + +var ntEnumSupport = NewEnumSupport(map[uint]string{ + uint(NodeTypeInvalid): "INVALID NODE TYPE", + uint(NodeTypeContainer): "Container", + uint(NodeTypeIt): "It", + uint(NodeTypeBeforeEach): "BeforeEach", + uint(NodeTypeJustBeforeEach): "JustBeforeEach", + uint(NodeTypeAfterEach): "AfterEach", + uint(NodeTypeJustAfterEach): "JustAfterEach", + uint(NodeTypeBeforeAll): "BeforeAll", + uint(NodeTypeAfterAll): "AfterAll", + uint(NodeTypeBeforeSuite): "BeforeSuite", + uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite", + uint(NodeTypeAfterSuite): "AfterSuite", + uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite", + uint(NodeTypeReportBeforeEach): "ReportBeforeEach", + uint(NodeTypeReportAfterEach): "ReportAfterEach", + uint(NodeTypeReportAfterSuite): "ReportAfterSuite", + uint(NodeTypeCleanupInvalid): "INVALID CLEANUP NODE", + uint(NodeTypeCleanupAfterEach): "DeferCleanup", + uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)", + uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)", +}) + +func (nt NodeType) String() string { + return ntEnumSupport.String(uint(nt)) +} +func (nt *NodeType) UnmarshalJSON(b []byte) error { + out, err := ntEnumSupport.UnmarshJSON(b) + *nt = NodeType(out) + return err +} +func (nt NodeType) MarshalJSON() ([]byte, error) { + return ntEnumSupport.MarshJSON(uint(nt)) +} + +func (nt NodeType) Is(nodeTypes NodeType) bool { + return nt&nodeTypes != 0 +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go new file mode 100644 index 00000000..00781a44 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -0,0 +1,3 @@ +package types + +const VERSION = "2.0.0" diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod deleted file mode 100644 index 7fea4ac0..00000000 --- a/vendor/github.com/onsi/gomega/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/onsi/gomega - -go 1.16 - -require ( - github.com/golang/protobuf v1.5.2 - github.com/onsi/ginkgo v1.16.4 - golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 - gopkg.in/yaml.v2 v2.4.0 -) diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum deleted file mode 100644 index 56f1b44e..00000000 --- a/vendor/github.com/onsi/gomega/go.sum +++ /dev/null @@ -1,106 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/vendor/github.com/pelletier/go-toml/go.mod b/vendor/github.com/pelletier/go-toml/go.mod deleted file mode 100644 index 7d29a0a6..00000000 --- a/vendor/github.com/pelletier/go-toml/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/pelletier/go-toml - -go 1.12 diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod deleted file mode 100644 index ba6681f5..00000000 --- a/vendor/github.com/prometheus/procfs/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/prometheus/procfs - -go 1.13 - -require ( - github.com/google/go-cmp v0.5.4 - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c -) diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum deleted file mode 100644 index 7ceaf56b..00000000 --- a/vendor/github.com/prometheus/procfs/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod deleted file mode 100644 index b3919d5e..00000000 --- a/vendor/github.com/sirupsen/logrus/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/sirupsen/logrus - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 -) - -go 1.13 diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum deleted file mode 100644 index 694c18b8..00000000 --- a/vendor/github.com/sirupsen/logrus/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod deleted file mode 100644 index abe4fe1c..00000000 --- a/vendor/github.com/spf13/afero/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/spf13/afero - -require ( - github.com/pkg/sftp v1.10.1 - golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 - golang.org/x/text v0.3.3 -) - -go 1.13 diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum deleted file mode 100644 index 89d9bfbc..00000000 --- a/vendor/github.com/spf13/afero/go.sum +++ /dev/null @@ -1,29 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/spf13/cast/.travis.yml b/vendor/github.com/spf13/cast/.travis.yml deleted file mode 100644 index 833a4879..00000000 --- a/vendor/github.com/spf13/cast/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -env: - - GO111MODULE=on -sudo: required -go: - - "1.11.x" - - "1.12.x" - - tip -os: - - linux -matrix: - allow_failures: - - go: tip - fast_finish: true -script: - - make check diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md index e6939397..120a5734 100644 --- a/vendor/github.com/spf13/cast/README.md +++ b/vendor/github.com/spf13/cast/README.md @@ -1,7 +1,7 @@ cast ==== [![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast) -[![Build Status](https://api.travis-ci.org/spf13/cast.svg?branch=master)](https://travis-ci.org/spf13/cast) +[![Build Status](https://github.com/spf13/cast/actions/workflows/go.yml/badge.svg)](https://github.com/spf13/cast/actions/workflows/go.yml) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) Easy and safe casting from one type to another in Go diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go index 9fba638d..0cfe9418 100644 --- a/vendor/github.com/spf13/cast/cast.go +++ b/vendor/github.com/spf13/cast/cast.go @@ -20,6 +20,11 @@ func ToTime(i interface{}) time.Time { return v } +func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { + v, _ := ToTimeInDefaultLocationE(i, location) + return v +} + // ToDuration casts an interface to a time.Duration type. func ToDuration(i interface{}) time.Duration { v, _ := ToDurationE(i) diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go index 70c7291b..c04af6a9 100644 --- a/vendor/github.com/spf13/cast/caste.go +++ b/vendor/github.com/spf13/cast/caste.go @@ -20,13 +20,20 @@ var errNegativeNotAllowed = errors.New("unable to cast negative value") // ToTimeE casts an interface to a time.Time type. func ToTimeE(i interface{}) (tim time.Time, err error) { + return ToTimeInDefaultLocationE(i, time.UTC) +} + +// ToTimeInDefaultLocationE casts an empty interface to time.Time, +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { i = indirect(i) switch v := i.(type) { case time.Time: return v, nil case string: - return StringToDate(v) + return StringToDateInDefaultLocation(v, location) case int: return time.Unix(int64(v), 0), nil case int64: @@ -1129,8 +1136,43 @@ func ToStringSliceE(i interface{}) ([]string, error) { return a, nil case []string: return v, nil + case []int8: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int32: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int64: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []float32: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []float64: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil case string: return strings.Fields(v), nil + case []error: + for _, err := range i.([]error) { + a = append(a, err.Error()) + } + return a, nil case interface{}: str, err := ToStringE(v) if err != nil { @@ -1204,37 +1246,83 @@ func ToDurationSliceE(i interface{}) ([]time.Duration, error) { // predefined list of formats. If no suitable format is found, an error is // returned. func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, []string{ - time.RFC3339, - "2006-01-02T15:04:05", // iso8601 without timezone - time.RFC1123Z, - time.RFC1123, - time.RFC822Z, - time.RFC822, - time.RFC850, - time.ANSIC, - time.UnixDate, - time.RubyDate, - "2006-01-02 15:04:05.999999999 -0700 MST", // Time.String() - "2006-01-02", - "02 Jan 2006", - "2006-01-02T15:04:05-0700", // RFC3339 without timezone hh:mm colon - "2006-01-02 15:04:05 -07:00", - "2006-01-02 15:04:05 -0700", - "2006-01-02 15:04:05Z07:00", // RFC3339 without T - "2006-01-02 15:04:05Z0700", // RFC3339 without T or timezone hh:mm colon - "2006-01-02 15:04:05", - time.Kitchen, - time.Stamp, - time.StampMilli, - time.StampMicro, - time.StampNano, - }) + return parseDateWith(s, time.UTC, timeFormats) +} + +// StringToDateInDefaultLocation casts an empty interface to a time.Time, +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { + return parseDateWith(s, location, timeFormats) } -func parseDateWith(s string, dates []string) (d time.Time, e error) { - for _, dateType := range dates { - if d, e = time.Parse(dateType, s); e == nil { +type timeFormatType int + +const ( + timeFormatNoTimezone timeFormatType = iota + timeFormatNamedTimezone + timeFormatNumericTimezone + timeFormatNumericAndNamedTimezone + timeFormatTimeOnly +) + +type timeFormat struct { + format string + typ timeFormatType +} + +func (f timeFormat) hasTimezone() bool { + // We don't include the formats with only named timezones, see + // https://github.com/golang/go/issues/19694#issuecomment-289103522 + return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone +} + +var ( + timeFormats = []timeFormat{ + timeFormat{time.RFC3339, timeFormatNumericTimezone}, + timeFormat{"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone + timeFormat{time.RFC1123Z, timeFormatNumericTimezone}, + timeFormat{time.RFC1123, timeFormatNamedTimezone}, + timeFormat{time.RFC822Z, timeFormatNumericTimezone}, + timeFormat{time.RFC822, timeFormatNamedTimezone}, + timeFormat{time.RFC850, timeFormatNamedTimezone}, + timeFormat{"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() + timeFormat{"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + timeFormat{"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + timeFormat{"2006-01-02 15:04:05", timeFormatNoTimezone}, + timeFormat{time.ANSIC, timeFormatNoTimezone}, + timeFormat{time.UnixDate, timeFormatNamedTimezone}, + timeFormat{time.RubyDate, timeFormatNumericTimezone}, + timeFormat{"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, + timeFormat{"2006-01-02", timeFormatNoTimezone}, + timeFormat{"02 Jan 2006", timeFormatNoTimezone}, + timeFormat{"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, + timeFormat{"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, + timeFormat{time.Kitchen, timeFormatTimeOnly}, + timeFormat{time.Stamp, timeFormatTimeOnly}, + timeFormat{time.StampMilli, timeFormatTimeOnly}, + timeFormat{time.StampMicro, timeFormatTimeOnly}, + timeFormat{time.StampNano, timeFormatTimeOnly}, + } +) + +func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { + + for _, format := range formats { + if d, e = time.Parse(format.format, s); e == nil { + + // Some time formats have a zone name, but no offset, so it gets + // put in that zone name (not the default one passed in to us), but + // without that zone's offset. So set the location manually. + if format.typ <= timeFormatNamedTimezone { + if location == nil { + location = time.Local + } + year, month, day := d.Date() + hour, min, sec := d.Clock() + d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) + } + return } } diff --git a/vendor/github.com/spf13/cast/go.mod b/vendor/github.com/spf13/cast/go.mod deleted file mode 100644 index c1c0232d..00000000 --- a/vendor/github.com/spf13/cast/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module github.com/spf13/cast - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 -) diff --git a/vendor/github.com/spf13/cast/go.sum b/vendor/github.com/spf13/cast/go.sum deleted file mode 100644 index e03ee77d..00000000 --- a/vendor/github.com/spf13/cast/go.sum +++ /dev/null @@ -1,6 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go new file mode 100644 index 00000000..1524fc82 --- /dev/null +++ b/vendor/github.com/spf13/cast/timeformattype_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. + +package cast + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[timeFormatNoTimezone-0] + _ = x[timeFormatNamedTimezone-1] + _ = x[timeFormatNumericTimezone-2] + _ = x[timeFormatNumericAndNamedTimezone-3] + _ = x[timeFormatTimeOnly-4] +} + +const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" + +var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} + +func (i timeFormatType) String() string { + if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { + return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] +} diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index e0a3b500..00000000 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go - -stages: - - test - - build - -go: - - 1.12.x - - 1.13.x - - tip - -env: GO111MODULE=on - -before_install: - - go get -u github.com/kyoh86/richgo - - go get -u github.com/mitchellh/gox - - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin latest - -matrix: - allow_failures: - - go: tip - include: - - stage: build - go: 1.13.x - script: make cobra_generator - -script: - - make test diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile index 472c73bf..5880f04e 100644 --- a/vendor/github.com/spf13/cobra/Makefile +++ b/vendor/github.com/spf13/cobra/Makefile @@ -23,7 +23,7 @@ lint: $(info ******************** running lint tools ********************) golangci-lint run -v -test: install_deps lint +test: install_deps $(info ******************** running tests ********************) richgo test -v ./... diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index a1b13ddd..1ade1081 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -7,33 +7,10 @@ Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/), name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. [![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) [![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) -# Table of Contents - -- [Overview](#overview) -- [Concepts](#concepts) - * [Commands](#commands) - * [Flags](#flags) -- [Installing](#installing) -- [Getting Started](#getting-started) - * [Using the Cobra Generator](#using-the-cobra-generator) - * [Using the Cobra Library](#using-the-cobra-library) - * [Working with Flags](#working-with-flags) - * [Positional and Custom Arguments](#positional-and-custom-arguments) - * [Example](#example) - * [Help Command](#help-command) - * [Usage Message](#usage-message) - * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) - * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) - * [Generating documentation for your command](#generating-documentation-for-your-command) - * [Generating shell completions](#generating-shell-completions) -- [Contributing](CONTRIBUTING.md) -- [License](#license) - # Overview Cobra is a library providing a simple interface to create powerful modern CLI @@ -47,7 +24,7 @@ Cobra provides: * Fully POSIX-compliant flags (including short & long versions) * Nested subcommands * Global, local and cascading flags -* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` +* Easy generation of applications & commands with `cobra init` & `cobra add cmdname` * Intelligent suggestions (`app srver`... did you mean `app server`?) * Automatic help generation for commands and flags * Automatic help flag recognition of `-h`, `--help`, etc. @@ -55,7 +32,7 @@ Cobra provides: * Automatically generated man pages for your application * Command aliases so you can change things without breaking them * The flexibility to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps +* Optional seamless integration with [viper](http://github.com/spf13/viper) for 12-factor apps # Concepts @@ -89,7 +66,7 @@ have children commands and optionally run an action. In the example above, 'server' is the command. -[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) +[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command) ## Flags @@ -117,643 +94,13 @@ Next, include Cobra in your application: import "github.com/spf13/cobra" ``` -# Getting Started - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - +# Usage Cobra provides its own program that will create your application and add any commands you want. It's the easiest way to incorporate Cobra into your application. -[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -package cmd - -import ( - "fmt" - "os" - - homedir "github.com/mitchellh/go-homedir" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - // Used for flags. - cfgFile string - userLicense string - - rootCmd = &cobra.Command{ - Use: "cobra", - Short: "A generator for Cobra based Applications", - Long: `Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, - } -) - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} - -func init() { - cobra.OnInitialize(initConfig) - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") - rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") - - rootCmd.AddCommand(addCmd) - rootCmd.AddCommand(initCmd) -} - -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := homedir.Dir() - cobra.CheckErr(err) - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigName(".cobra") - } - - viper.AutomaticEnv() - - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Returning and handling errors - -If you wish to return an error to the caller of a command, `RunE` can be used. - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(tryCmd) -} - -var tryCmd = &cobra.Command{ - Use: "try", - Short: "Try and possibly fail at something", - RunE: func(cmd *cobra.Command, args []string) error { - if err := someFunc(); err != nil { - return err - } - return nil - }, -} -``` - -The error can then be caught at the execute function call. - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent', meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally, which will only apply to that specific command. - -```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default, Cobra only parses local flags on the target command, and any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example, the persistent flag `author` is bound with `viper`. -**Note**: the variable `author` will not be set to the value from config, -when the `--author` flag is not provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -Or, for persistent flags: -```go -rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkPersistentFlagRequired("region") -``` - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field -of `Command`. - -The following validators are built in: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. -- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. -- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` -- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. - -An example of setting the custom validator: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("requires a color argument") - } - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable, meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Echo: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra --invalid - Error: unknown flag: --invalid - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). - -## Generating shell completions +For complete details on using the Cobra generator, please read [The Cobra Generator README](https://github.com/spf13/cobra/blob/master/cobra/README.md) -Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). +For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md). # License diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index 70e9b262..20a022b3 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -107,3 +107,15 @@ func RangeArgs(min int, max int) PositionalArgs { return nil } } + +// MatchAll allows combining several PositionalArgs to work in concert. +func MatchAll(pargs ...PositionalArgs) PositionalArgs { + return func(cmd *Command, args []string) error { + for _, parg := range pargs { + if err := parg(cmd, args); err != nil { + return err + } + } + return nil + } +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 71061479..6c360c59 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -24,7 +24,7 @@ func writePreamble(buf io.StringWriter, name string) { WriteStringAndCheck(buf, fmt.Sprintf(` __%[1]s_debug() { - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then echo "$*" >> "${BASH_COMP_DEBUG_FILE}" fi } @@ -134,7 +134,7 @@ __%[1]s_handle_go_custom_completion() $filteringCmd elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then # File completion for directories only - local subDir + local subdir # Use printf to strip any trailing newline subdir=$(printf "%%s" "${out[0]}") if [ -n "$subdir" ]; then @@ -187,13 +187,19 @@ __%[1]s_handle_reply() PREFIX="" cur="${cur#*=}" ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then + if [ -n "${ZSH_VERSION:-}" ]; then # zsh completion needs --flag= prefix eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" fi fi fi - return 0; + + if [[ -z "${flag_parsing_disabled}" ]]; then + # If flag parsing is enabled, we have completed the flags and can return. + # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough + # to possibly call handle_go_custom_completion. + return 0; + fi ;; esac @@ -232,13 +238,13 @@ __%[1]s_handle_reply() fi if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - if declare -F __%[1]s_custom_func >/dev/null; then - # try command name qualified custom func - __%[1]s_custom_func - else - # otherwise fall back to unqualified for compatibility - declare -F __custom_func >/dev/null && __custom_func - fi + if declare -F __%[1]s_custom_func >/dev/null; then + # try command name qualified custom func + __%[1]s_custom_func + else + # otherwise fall back to unqualified for compatibility + declare -F __custom_func >/dev/null && __custom_func + fi fi # available in bash-completion >= 2, not always present on macOS @@ -272,7 +278,7 @@ __%[1]s_handle_flag() # if a command required a flag, and we found it, unset must_have_one_flag() local flagname=${words[c]} - local flagvalue + local flagvalue="" # if the word contained an = if [[ ${words[c]} == *"="* ]]; then flagvalue=${flagname#*=} # take in as flagvalue after the = @@ -291,7 +297,7 @@ __%[1]s_handle_flag() # keep flag value with flagname as flaghash # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then if [ -n "${flagvalue}" ] ; then flaghash[${flagname}]=${flagvalue} elif [ -n "${words[ $((c+1)) ]}" ] ; then @@ -303,7 +309,7 @@ __%[1]s_handle_flag() # skip the argument to a two word flag if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then - __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" + __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" c=$((c+1)) # if we are looking for a flags value, don't show commands if [[ $c -eq $cword ]]; then @@ -363,7 +369,7 @@ __%[1]s_handle_word() __%[1]s_handle_command elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then # aliashash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then words[c]=${aliashash[${words[c]}]} __%[1]s_handle_command else @@ -384,7 +390,7 @@ func writePostscript(buf io.StringWriter, name string) { name = strings.Replace(name, ":", "__", -1) WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) WriteStringAndCheck(buf, fmt.Sprintf(`{ - local cur prev words cword + local cur prev words cword split declare -A flaghash 2>/dev/null || : declare -A aliashash 2>/dev/null || : if declare -F _init_completion >/dev/null 2>&1; then @@ -394,17 +400,20 @@ func writePostscript(buf io.StringWriter, name string) { fi local c=0 + local flag_parsing_disabled= local flags=() local two_word_flags=() local local_nonpersistent_flags=() local flags_with_completion=() local flags_completion=() local commands=("%[1]s") + local command_aliases=() local must_have_one_flag=() local must_have_one_noun=() - local has_completion_function - local last_command + local has_completion_function="" + local last_command="" local nouns=() + local noun_aliases=() __%[1]s_handle_word } @@ -510,6 +519,8 @@ func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { // Setup annotations for go completions for registered flags func prepareCustomAnnotationsForFlags(cmd *Command) { + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() for flag := range flagCompletionFunctions { // Make sure the completion script calls the __*_go_custom_completion function for // every registered flag. We need to do this here (and not when the flag was registered @@ -531,6 +542,11 @@ func writeFlags(buf io.StringWriter, cmd *Command) { flags_completion=() `) + + if cmd.DisableFlagParsing { + WriteStringAndCheck(buf, " flag_parsing_disabled=1\n") + } + localNonPersistentFlags := cmd.LocalNonPersistentFlags() cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { if nonCompletableFlag(flag) { @@ -605,7 +621,7 @@ func writeCmdAliases(buf io.StringWriter, cmd *Command) { sort.Strings(cmd.Aliases) - WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) + WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n")) for _, value := range cmd.Aliases { WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md index 130f99b9..52919b2f 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -6,6 +6,8 @@ Please refer to [Shell Completions](shell_completions.md) for details. For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. +**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own. + The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. Some code that works in kubernetes: diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go new file mode 100644 index 00000000..82d26c17 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -0,0 +1,331 @@ +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" +) + +func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genBashComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func genBashComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + + WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*- + +__%[1]s_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Macs have bash3 for which the bash-completion package doesn't include +# _init_completion. This is a minimal version of that function. +__%[1]s_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +# This function calls the %[1]s program to obtain the completion +# results and the directive. It fills the 'out' and 'directive' vars. +__%[1]s_get_completion_results() { + local requestComp lastParam lastChar args + + # Prepare the command to request completions for the program. + # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + args=("${words[@]:1}") + requestComp="${words[0]} %[2]s ${args[*]}" + + lastParam=${words[$((${#words[@]}-1))]} + lastChar=${lastParam:$((${#lastParam}-1)):1} + __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" + + if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" + requestComp="${requestComp} ''" + fi + + # When completing a flag with an = (e.g., %[1]s -n=) + # bash focuses on the part after the =, so we need to remove + # the flag part from $cur + if [[ "${cur}" == -*=* ]]; then + cur="${cur#*=}" + fi + + __%[1]s_debug "Calling ${requestComp}" + # Use eval to handle any environment variables and such + out=$(eval "${requestComp}" 2>/dev/null) + + # Extract the directive integer at the very end of the output following a colon (:) + directive=${out##*:} + # Remove the directive + out=${out%%:*} + if [ "${directive}" = "${out}" ]; then + # There is not directive specified + directive=0 + fi + __%[1]s_debug "The completion directive is: ${directive}" + __%[1]s_debug "The completions are: ${out[*]}" +} + +__%[1]s_process_completion_results() { + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + else + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "Activating no space" + compopt -o nospace + else + __%[1]s_debug "No space directive not supported in this version of bash" + fi + fi + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "Activating no file completion" + compopt +o default + else + __%[1]s_debug "No file completion directive not supported in this version of bash" + fi + fi + fi + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local fullFilter filter filteringCmd + + # Do not use quotes around the $out variable or else newline + # characters will be kept. + for filter in ${out[*]}; do + fullFilter+="$filter|" + done + + filteringCmd="_filedir $fullFilter" + __%[1]s_debug "File filtering command: $filteringCmd" + $filteringCmd + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + + # Use printf to strip any trailing newline + local subdir + subdir=$(printf "%%s" "${out[0]}") + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return + else + __%[1]s_debug "Listing directories in ." + _filedir -d + fi + else + __%[1]s_handle_completion_types + fi + + __%[1]s_handle_special_char "$cur" : + __%[1]s_handle_special_char "$cur" = +} + +__%[1]s_handle_completion_types() { + __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE" + + case $COMP_TYPE in + 37|42) + # Type: menu-complete/menu-complete-backward and insert-completions + # If the user requested inserting one completion at a time, or all + # completions at once on the command-line we must remove the descriptions. + # https://github.com/spf13/cobra/issues/1508 + local tab comp + tab=$(printf '\t') + while IFS='' read -r comp; do + # Strip any description + comp=${comp%%%%$tab*} + # Only consider the completions that match + comp=$(compgen -W "$comp" -- "$cur") + if [ -n "$comp" ]; then + COMPREPLY+=("$comp") + fi + done < <(printf "%%s\n" "${out[@]}") + ;; + + *) + # Type: complete (normal completion) + __%[1]s_handle_standard_completion_case + ;; + esac +} + +__%[1]s_handle_standard_completion_case() { + local tab comp + tab=$(printf '\t') + + local longest=0 + # Look for the longest completion so that we can format things nicely + while IFS='' read -r comp; do + # Strip any description before checking the length + comp=${comp%%%%$tab*} + # Only consider the completions that match + comp=$(compgen -W "$comp" -- "$cur") + if ((${#comp}>longest)); then + longest=${#comp} + fi + done < <(printf "%%s\n" "${out[@]}") + + local completions=() + while IFS='' read -r comp; do + if [ -z "$comp" ]; then + continue + fi + + __%[1]s_debug "Original comp: $comp" + comp="$(__%[1]s_format_comp_descriptions "$comp" "$longest")" + __%[1]s_debug "Final comp: $comp" + completions+=("$comp") + done < <(printf "%%s\n" "${out[@]}") + + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${completions[*]}" -- "$cur") + + # If there is a single completion left, remove the description text + if [ ${#COMPREPLY[*]} -eq 1 ]; then + __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" + comp="${COMPREPLY[0]%%%% *}" + __%[1]s_debug "Removed description from single completion, which is now: ${comp}" + COMPREPLY=() + COMPREPLY+=("$comp") + fi +} + +__%[1]s_handle_special_char() +{ + local comp="$1" + local char=$2 + if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then + local word=${comp%%"${comp##*${char}}"} + local idx=${#COMPREPLY[*]} + while [[ $((--idx)) -ge 0 ]]; do + COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"} + done + fi +} + +__%[1]s_format_comp_descriptions() +{ + local tab + tab=$(printf '\t') + local comp="$1" + local longest=$2 + + # Properly format the description string which follows a tab character if there is one + if [[ "$comp" == *$tab* ]]; then + desc=${comp#*$tab} + comp=${comp%%%%$tab*} + + # $COLUMNS stores the current shell width. + # Remove an extra 4 because we add 2 spaces and 2 parentheses. + maxdesclength=$(( COLUMNS - longest - 4 )) + + # Make sure we can fit a description of at least 8 characters + # if we are to align the descriptions. + if [[ $maxdesclength -gt 8 ]]; then + # Add the proper number of spaces to align the descriptions + for ((i = ${#comp} ; i < longest ; i++)); do + comp+=" " + done + else + # Don't pad the descriptions so we can fit more text after the completion + maxdesclength=$(( COLUMNS - ${#comp} - 4 )) + fi + + # If there is enough space for any description text, + # truncate the descriptions that are too long for the shell width + if [ $maxdesclength -gt 0 ]; then + if [ ${#desc} -gt $maxdesclength ]; then + desc=${desc:0:$(( maxdesclength - 1 ))} + desc+="…" + fi + comp+=" ($desc)" + fi + fi + + # Must use printf to escape all special characters + printf "%%q" "${comp}" +} + +__start_%[1]s() +{ + local cur prev words cword split + + COMPREPLY=() + + # Call _init_completion from the bash-completion package + # to prepare the arguments properly + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -n "=:" || return + else + __%[1]s_init_completion -n "=:" || return + fi + + __%[1]s_debug + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $cword location, so we need + # to truncate the command-line ($words) up to the $cword location. + words=("${words[@]:0:$cword+1}") + __%[1]s_debug "Truncated words[*]: ${words[*]}," + + local out directive + __%[1]s_get_completion_results + __%[1]s_process_completion_results +} + +if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%[1]s %[1]s +else + complete -o default -o nospace -F __start_%[1]s %[1]s +fi + +# ex: ts=4 sw=4 et filetype=sh +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) +} + +// GenBashCompletionFileV2 generates Bash completion version 2. +func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenBashCompletionV2(outFile, includeDesc) +} + +// GenBashCompletionV2 generates Bash completion file version 2 +// and writes it to the passed writer. +func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error { + return c.genBashCompletion(w, includeDesc) +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index d6732ad1..2cc18891 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -63,9 +63,9 @@ type Command struct { // Example is examples of how to use the command. Example string - // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions + // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions ValidArgs []string - // ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion. + // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. // It is a dynamic version of using ValidArgs. // Only one of ValidArgs and ValidArgsFunction can be used for a command. ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) @@ -74,11 +74,12 @@ type Command struct { Args PositionalArgs // ArgAliases is List of aliases for ValidArgs. - // These are not suggested to the user in the bash completion, + // These are not suggested to the user in the shell completion, // but accepted if entered manually. ArgAliases []string - // BashCompletionFunction is custom functions used by the bash autocompletion generator. + // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. + // For portability with other shells, it is recommended to instead use ValidArgsFunction BashCompletionFunction string // Deprecated defines, if this command is deprecated and should print this string when used. @@ -168,6 +169,9 @@ type Command struct { //FParseErrWhitelist flag parse errors to be ignored FParseErrWhitelist FParseErrWhitelist + // CompletionOptions is a set of options to control the handling of shell completion + CompletionOptions CompletionOptions + // commandsAreSorted defines, if command slice are sorted or not. commandsAreSorted bool // commandCalledAs is the name or alias value used to call this command. @@ -884,7 +888,8 @@ func (c *Command) preRun() { } // ExecuteContext is the same as Execute(), but sets the ctx on the command. -// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. func (c *Command) ExecuteContext(ctx context.Context) error { c.ctx = ctx return c.Execute() @@ -898,6 +903,14 @@ func (c *Command) Execute() error { return err } +// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. +func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) { + c.ctx = ctx + return c.ExecuteC() +} + // ExecuteC executes the command. func (c *Command) ExecuteC() (cmd *Command, err error) { if c.ctx == nil { @@ -914,9 +927,10 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { preExecHookFn(c) } - // initialize help as the last point possible to allow for user - // overriding + // initialize help at the last point to allow for user overriding c.InitDefaultHelpCmd() + // initialize completion at the last point to allow for user overriding + c.initDefaultCompletionCmd() args := c.args @@ -925,7 +939,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { args = os.Args[1:] } - // initialize the hidden command to be used for bash completion + // initialize the hidden command to be used for shell completion c.initCompleteCmd(args) var flags []string diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go index 6159c1cc..bb5dad90 100644 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package cobra diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go index 8768b173..a84f5a82 100644 --- a/vendor/github.com/spf13/cobra/command_win.go +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package cobra diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/completions.go similarity index 59% rename from vendor/github.com/spf13/cobra/custom_completions.go rename to vendor/github.com/spf13/cobra/completions.go index fa060c14..9ecd56a4 100644 --- a/vendor/github.com/spf13/cobra/custom_completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "strings" + "sync" "github.com/spf13/pflag" ) @@ -17,13 +18,25 @@ const ( ShellCompNoDescRequestCmd = "__completeNoDesc" ) -// Global map of flag completion functions. +// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} +// lock for reading and writing from flagCompletionFunctions +var flagCompletionMutex = &sync.RWMutex{} + // ShellCompDirective is a bit map representing the different behaviors the shell // can be instructed to have once completions have been provided. type ShellCompDirective int +type flagCompError struct { + subCommand string + flagName string +} + +func (e *flagCompError) Error() string { + return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'" +} + const ( // ShellCompDirectiveError indicates an error occurred and completions should be ignored. ShellCompDirectiveError ShellCompDirective = 1 << iota @@ -34,7 +47,6 @@ const ( // ShellCompDirectiveNoFileComp indicates that the shell should not provide // file completion even when no completion is provided. - // This currently does not work for zsh or bash < 4 ShellCompDirectiveNoFileComp // ShellCompDirectiveFilterFileExt indicates that the provided completions @@ -63,12 +75,43 @@ const ( ShellCompDirectiveDefault ShellCompDirective = 0 ) +const ( + // Constants for the completion command + compCmdName = "completion" + compCmdNoDescFlagName = "no-descriptions" + compCmdNoDescFlagDesc = "disable completion descriptions" + compCmdNoDescFlagDefault = false +) + +// CompletionOptions are the options to control shell completion +type CompletionOptions struct { + // DisableDefaultCmd prevents Cobra from creating a default 'completion' command + DisableDefaultCmd bool + // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag + // for shells that support completion descriptions + DisableNoDescFlag bool + // DisableDescriptions turns off all completion descriptions for shells + // that support them + DisableDescriptions bool + // HiddenDefaultCmd makes the default 'completion' command hidden + HiddenDefaultCmd bool +} + +// NoFileCompletions can be used to disable file completion for commands that should +// not trigger file completions. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return nil, ShellCompDirectiveNoFileComp +} + // RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { flag := c.Flag(flagName) if flag == nil { return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) } + flagCompletionMutex.Lock() + defer flagCompletionMutex.Unlock() + if _, exists := flagCompletionFunctions[flag]; exists { return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) } @@ -149,10 +192,6 @@ func (c *Command) initCompleteCmd(args []string) { fmt.Fprintln(finalCmd.OutOrStdout(), comp) } - if directive >= shellCompDirectiveMaxValue { - directive = ShellCompDirectiveDefault - } - // As the last printout, print the completion directive for the completion script to parse. // The directive integer must be that last character following a single colon (:). // The completion script expects : @@ -189,29 +228,63 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if c.Root().TraverseChildren { finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs) } else { - finalCmd, finalArgs, err = c.Root().Find(trimmedArgs) + // For Root commands that don't specify any value for their Args fields, when we call + // Find(), if those Root commands don't have any sub-commands, they will accept arguments. + // However, because we have added the __complete sub-command in the current code path, the + // call to Find() -> legacyArgs() will return an error if there are any arguments. + // To avoid this, we first remove the __complete command to get back to having no sub-commands. + rootCmd := c.Root() + if len(rootCmd.Commands()) == 1 { + rootCmd.RemoveCommand(c) + } + + finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs) } if err != nil { // Unable to find the real command. E.g., someInvalidCmd return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) } + finalCmd.ctx = c.ctx // Check if we are doing flag value completion before parsing the flags. // This is important because if we are completing a flag value, we need to also // remove the flag name argument from the list of finalArgs or else the parsing // could fail due to an invalid value (incomplete) for the flag. - flag, finalArgs, toComplete, err := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) - if err != nil { - // Error while attempting to parse flags - return finalCmd, []string{}, ShellCompDirectiveDefault, err - } + flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) + + // Check if interspersed is false or -- was set on a previous arg. + // This works by counting the arguments. Normally -- is not counted as arg but + // if -- was already set or interspersed is false and there is already one arg then + // the extra added -- is counted as arg. + flagCompletion := true + _ = finalCmd.ParseFlags(append(finalArgs, "--")) + newArgCount := finalCmd.Flags().NArg() // Parse the flags early so we can check if required flags are set if err = finalCmd.ParseFlags(finalArgs); err != nil { return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) } - if flag != nil { + realArgCount := finalCmd.Flags().NArg() + if newArgCount > realArgCount { + // don't do flag completion (see above) + flagCompletion = false + } + // Error while attempting to parse flags + if flagErr != nil { + // If error type is flagCompError and we don't want flagCompletion we should ignore the error + if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + } + } + + // We only remove the flags from the arguments if DisableFlagParsing is not set. + // This is important for commands which have requested to do their own flag completion. + if !finalCmd.DisableFlagParsing { + finalArgs = finalCmd.Flags().Args() + } + + if flag != nil && flagCompletion { // Check if we are completing a flag value subject to annotations if validExts, present := flag.Annotations[BashCompFilenameExt]; present { if len(validExts) != 0 { @@ -235,12 +308,16 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } } + var completions []string + var directive ShellCompDirective + + // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true; + // doing this allows for completion of persistant flag names even for commands that disable flag parsing. + // // When doing completion of a flag name, as soon as an argument starts with // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires // the flag name to be complete - if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") { - var completions []string - + if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion { // First check for required flags completions = completeRequireFlags(finalCmd, toComplete) @@ -267,92 +344,94 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi }) } - directive := ShellCompDirectiveNoFileComp + directive = ShellCompDirectiveNoFileComp if len(completions) == 1 && strings.HasSuffix(completions[0], "=") { // If there is a single completion, the shell usually adds a space // after the completion. We don't want that if the flag ends with an = directive = ShellCompDirectiveNoSpace } - return finalCmd, completions, directive, nil - } - // We only remove the flags from the arguments if DisableFlagParsing is not set. - // This is important for commands which have requested to do their own flag completion. - if !finalCmd.DisableFlagParsing { - finalArgs = finalCmd.Flags().Args() - } - - var completions []string - directive := ShellCompDirectiveDefault - if flag == nil { - foundLocalNonPersistentFlag := false - // If TraverseChildren is true on the root command we don't check for - // local flags because we can use a local flag on a parent command - if !finalCmd.Root().TraverseChildren { - // Check if there are any local, non-persistent flags on the command-line - localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { - foundLocalNonPersistentFlag = true - } - }) + if !finalCmd.DisableFlagParsing { + // If DisableFlagParsing==false, we have completed the flags as known by Cobra; + // we can return what we found. + // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we + // let the logic continue to see if ValidArgsFunction needs to be called. + return finalCmd, completions, directive, nil } + } else { + directive = ShellCompDirectiveDefault + if flag == nil { + foundLocalNonPersistentFlag := false + // If TraverseChildren is true on the root command we don't check for + // local flags because we can use a local flag on a parent command + if !finalCmd.Root().TraverseChildren { + // Check if there are any local, non-persistent flags on the command-line + localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { + foundLocalNonPersistentFlag = true + } + }) + } - // Complete subcommand names, including the help command - if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { - // We only complete sub-commands if: - // - there are no arguments on the command-line and - // - there are no local, non-peristent flag on the command-line or TraverseChildren is true - for _, subCmd := range finalCmd.Commands() { - if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { - if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + // Complete subcommand names, including the help command + if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { + // We only complete sub-commands if: + // - there are no arguments on the command-line and + // - there are no local, non-persistent flags on the command-line or TraverseChildren is true + for _, subCmd := range finalCmd.Commands() { + if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { + if strings.HasPrefix(subCmd.Name(), toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + } + directive = ShellCompDirectiveNoFileComp } - directive = ShellCompDirectiveNoFileComp } } - } - // Complete required flags even without the '-' prefix - completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) - - // Always complete ValidArgs, even if we are completing a subcommand name. - // This is for commands that have both subcommands and ValidArgs. - if len(finalCmd.ValidArgs) > 0 { - if len(finalArgs) == 0 { - // ValidArgs are only for the first argument - for _, validArg := range finalCmd.ValidArgs { - if strings.HasPrefix(validArg, toComplete) { - completions = append(completions, validArg) + // Complete required flags even without the '-' prefix + completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) + + // Always complete ValidArgs, even if we are completing a subcommand name. + // This is for commands that have both subcommands and ValidArgs. + if len(finalCmd.ValidArgs) > 0 { + if len(finalArgs) == 0 { + // ValidArgs are only for the first argument + for _, validArg := range finalCmd.ValidArgs { + if strings.HasPrefix(validArg, toComplete) { + completions = append(completions, validArg) + } } - } - directive = ShellCompDirectiveNoFileComp - - // If no completions were found within commands or ValidArgs, - // see if there are any ArgAliases that should be completed. - if len(completions) == 0 { - for _, argAlias := range finalCmd.ArgAliases { - if strings.HasPrefix(argAlias, toComplete) { - completions = append(completions, argAlias) + directive = ShellCompDirectiveNoFileComp + + // If no completions were found within commands or ValidArgs, + // see if there are any ArgAliases that should be completed. + if len(completions) == 0 { + for _, argAlias := range finalCmd.ArgAliases { + if strings.HasPrefix(argAlias, toComplete) { + completions = append(completions, argAlias) + } } } } + + // If there are ValidArgs specified (even if they don't match), we stop completion. + // Only one of ValidArgs or ValidArgsFunction can be used for a single command. + return finalCmd, completions, directive, nil } - // If there are ValidArgs specified (even if they don't match), we stop completion. - // Only one of ValidArgs or ValidArgsFunction can be used for a single command. - return finalCmd, completions, directive, nil + // Let the logic continue so as to add any ValidArgsFunction completions, + // even if we already found sub-commands. + // This is for commands that have subcommands but also specify a ValidArgsFunction. } - - // Let the logic continue so as to add any ValidArgsFunction completions, - // even if we already found sub-commands. - // This is for commands that have subcommands but also specify a ValidArgsFunction. } // Find the completion function for the flag or command var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) - if flag != nil { + if flag != nil && flagCompletion { + flagCompletionMutex.RLock() completionFn = flagCompletionFunctions[flag] + flagCompletionMutex.RUnlock() } else { completionFn = finalCmd.ValidArgsFunction } @@ -435,6 +514,7 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p var flagName string trimmedArgs := args flagWithEqual := false + orgLastArg := lastArg // When doing completion of a flag name, as soon as an argument starts with // a '-' we know it is a flag. We cannot use isFlagArg() here as that function @@ -442,7 +522,16 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p if len(lastArg) > 0 && lastArg[0] == '-' { if index := strings.Index(lastArg, "="); index >= 0 { // Flag with an = - flagName = strings.TrimLeft(lastArg[:index], "-") + if strings.HasPrefix(lastArg[:index], "--") { + // Flag has full name + flagName = lastArg[2:index] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = lastArg[index-1 : index] + } lastArg = lastArg[index+1:] flagWithEqual = true } else { @@ -459,8 +548,16 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p // If the flag contains an = it means it has already been fully processed, // so we don't need to deal with it here. if index := strings.Index(prevArg, "="); index < 0 { - flagName = strings.TrimLeft(prevArg, "-") - + if strings.HasPrefix(prevArg, "--") { + // Flag has full name + flagName = prevArg[2:] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = prevArg[len(prevArg)-1:] + } // Remove the uncompleted flag or else there could be an error created // for an invalid value for that flag trimmedArgs = args[:len(args)-1] @@ -476,9 +573,8 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p flag := findFlag(finalCmd, flagName) if flag == nil { - // Flag not supported by this command, nothing to complete - err := fmt.Errorf("Subcommand '%s' does not support flag '%s'", finalCmd.Name(), flagName) - return nil, nil, "", err + // Flag not supported by this command, the interspersed option might be set so return the original args + return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName} } if !flagWithEqual { @@ -494,6 +590,164 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p return flag, trimmedArgs, lastArg, nil } +// initDefaultCompletionCmd adds a default 'completion' command to c. +// This function will do nothing if any of the following is true: +// 1- the feature has been explicitly disabled by the program, +// 2- c has no subcommands (to avoid creating one), +// 3- c already has a 'completion' command provided by the program. +func (c *Command) initDefaultCompletionCmd() { + if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { + return + } + + for _, cmd := range c.commands { + if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) { + // A completion command is already available + return + } + } + + haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + + completionCmd := &Command{ + Use: compCmdName, + Short: "Generate the autocompletion script for the specified shell", + Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell. +See each sub-command's help for details on how to use the generated script. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + Hidden: c.CompletionOptions.HiddenDefaultCmd, + } + c.AddCommand(completionCmd) + + out := c.OutOrStdout() + noDesc := c.CompletionOptions.DisableDescriptions + shortDesc := "Generate the autocompletion script for %s" + bash := &Command{ + Use: "bash", + Short: fmt.Sprintf(shortDesc, "bash"), + Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: + + source <(%[1]s completion bash) + +To load completions for every new session, execute once: + +#### Linux: + + %[1]s completion bash > /etc/bash_completion.d/%[1]s + +#### macOS: + + %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + DisableFlagsInUseLine: true, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenBashCompletionV2(out, !noDesc) + }, + } + if haveNoDescFlag { + bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + zsh := &Command{ + Use: "zsh", + Short: fmt.Sprintf(shortDesc, "zsh"), + Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + + echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions for every new session, execute once: + +#### Linux: + + %[1]s completion zsh > "${fpath[1]}/_%[1]s" + +#### macOS: + + %[1]s completion zsh > /usr/local/share/zsh/site-functions/_%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenZshCompletionNoDesc(out) + } + return cmd.Root().GenZshCompletion(out) + }, + } + if haveNoDescFlag { + zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + fish := &Command{ + Use: "fish", + Short: fmt.Sprintf(shortDesc, "fish"), + Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: + + %[1]s completion fish | source + +To load completions for every new session, execute once: + + %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenFishCompletion(out, !noDesc) + }, + } + if haveNoDescFlag { + fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + powershell := &Command{ + Use: "powershell", + Short: fmt.Sprintf(shortDesc, "powershell"), + Long: fmt.Sprintf(`Generate the autocompletion script for powershell. + +To load completions in your current shell session: + + %[1]s completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenPowerShellCompletion(out) + } + return cmd.Root().GenPowerShellCompletionWithDesc(out) + + }, + } + if haveNoDescFlag { + powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + completionCmd.AddCommand(bash, zsh, fish, powershell) +} + func findFlag(cmd *Command, name string) *pflag.Flag { flagSet := cmd.Flags() if len(name) == 1 { diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index 3e112347..bb57fd56 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -21,44 +21,47 @@ func genFishComp(buf io.StringWriter, name string, includeDesc bool) { WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) WriteStringAndCheck(buf, fmt.Sprintf(` function __%[1]s_debug - set file "$BASH_COMP_DEBUG_FILE" + set -l file "$BASH_COMP_DEBUG_FILE" if test -n "$file" echo "$argv" >> $file end end function __%[1]s_perform_completion - __%[1]s_debug "Starting __%[1]s_perform_completion with: $argv" + __%[1]s_debug "Starting __%[1]s_perform_completion" - set args (string split -- " " "$argv") - set lastArg "$args[-1]" + # Extract all args except the last one + set -l args (commandline -opc) + # Extract the last arg and escape it in case it is a space + set -l lastArg (string escape -- (commandline -ct)) __%[1]s_debug "args: $args" __%[1]s_debug "last arg: $lastArg" - set emptyArg "" - if test -z "$lastArg" - __%[1]s_debug "Setting emptyArg" - set emptyArg \"\" - end - __%[1]s_debug "emptyArg: $emptyArg" - - if not type -q "$args[1]" - # This can happen when "complete --do-complete %[2]s" is called when running this script. - __%[1]s_debug "Cannot find $args[1]. No completions." - return - end + set -l requestComp "$args[1] %[3]s $args[2..-1] $lastArg" - set requestComp "$args[1] %[3]s $args[2..-1] $emptyArg" __%[1]s_debug "Calling $requestComp" + set -l results (eval $requestComp 2> /dev/null) + + # Some programs may output extra empty lines after the directive. + # Let's ignore them or else it will break completion. + # Ref: https://github.com/spf13/cobra/issues/1279 + for line in $results[-1..1] + if test (string trim -- $line) = "" + # Found an empty line, remove it + set results $results[1..-2] + else + # Found non-empty line, we have our proper output + break + end + end - set results (eval $requestComp 2> /dev/null) - set comps $results[1..-2] - set directiveLine $results[-1] + set -l comps $results[1..-2] + set -l directiveLine $results[-1] # For Fish, when completing a flag with an = (e.g., -n=) # completions must be prefixed with the flag - set flagPrefix (string match -r -- '-.*=' "$lastArg") + set -l flagPrefix (string match -r -- '-.*=' "$lastArg") __%[1]s_debug "Comps: $comps" __%[1]s_debug "DirectiveLine: $directiveLine" @@ -71,115 +74,124 @@ function __%[1]s_perform_completion printf "%%s\n" "$directiveLine" end -# This function does three things: -# 1- Obtain the completions and store them in the global __%[1]s_comp_results -# 2- Set the __%[1]s_comp_do_file_comp flag if file completion should be performed -# and unset it otherwise -# 3- Return true if the completion results are not empty +# This function does two things: +# - Obtain the completions and store them in the global __%[1]s_comp_results +# - Return false if file completion should be performed function __%[1]s_prepare_completions + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + # Start fresh - set --erase __%[1]s_comp_do_file_comp set --erase __%[1]s_comp_results - # Check if the command-line is already provided. This is useful for testing. - if not set --query __%[1]s_comp_commandLine - # Use the -c flag to allow for completion in the middle of the line - set __%[1]s_comp_commandLine (commandline -c) - end - __%[1]s_debug "commandLine is: $__%[1]s_comp_commandLine" - - set results (__%[1]s_perform_completion "$__%[1]s_comp_commandLine") - set --erase __%[1]s_comp_commandLine + set -l results (__%[1]s_perform_completion) __%[1]s_debug "Completion results: $results" if test -z "$results" __%[1]s_debug "No completion, probably due to a failure" # Might as well do file completion, in case it helps - set --global __%[1]s_comp_do_file_comp 1 return 1 end - set directive (string sub --start 2 $results[-1]) + set -l directive (string sub --start 2 $results[-1]) set --global __%[1]s_comp_results $results[1..-2] __%[1]s_debug "Completions are: $__%[1]s_comp_results" __%[1]s_debug "Directive is: $directive" - set shellCompDirectiveError %[4]d - set shellCompDirectiveNoSpace %[5]d - set shellCompDirectiveNoFileComp %[6]d - set shellCompDirectiveFilterFileExt %[7]d - set shellCompDirectiveFilterDirs %[8]d + set -l shellCompDirectiveError %[4]d + set -l shellCompDirectiveNoSpace %[5]d + set -l shellCompDirectiveNoFileComp %[6]d + set -l shellCompDirectiveFilterFileExt %[7]d + set -l shellCompDirectiveFilterDirs %[8]d if test -z "$directive" set directive 0 end - set compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) + set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) if test $compErr -eq 1 __%[1]s_debug "Received error directive: aborting." # Might as well do file completion, in case it helps - set --global __%[1]s_comp_do_file_comp 1 return 1 end - set filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) - set dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) + set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) + set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) if test $filefilter -eq 1; or test $dirfilter -eq 1 __%[1]s_debug "File extension filtering or directory filtering not supported" # Do full file completion instead - set --global __%[1]s_comp_do_file_comp 1 return 1 end - set nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) - set nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) + set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) + set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) __%[1]s_debug "nospace: $nospace, nofiles: $nofiles" - # Important not to quote the variable for count to work - set numComps (count $__%[1]s_comp_results) - __%[1]s_debug "numComps: $numComps" - - if test $numComps -eq 1; and test $nospace -ne 0 - # To support the "nospace" directive we trick the shell - # by outputting an extra, longer completion. - __%[1]s_debug "Adding second completion to perform nospace directive" - set --append __%[1]s_comp_results $__%[1]s_comp_results[1]. - end - - if test $numComps -eq 0; and test $nofiles -eq 0 - __%[1]s_debug "Requesting file completion" - set --global __%[1]s_comp_do_file_comp 1 + # If we want to prevent a space, or if file completion is NOT disabled, + # we need to count the number of valid completions. + # To do so, we will filter on prefix as the completions we have received + # may not already be filtered so as to allow fish to match on different + # criteria than the prefix. + if test $nospace -ne 0; or test $nofiles -eq 0 + set -l prefix (commandline -t | string escape --style=regex) + __%[1]s_debug "prefix: $prefix" + + set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results) + set --global __%[1]s_comp_results $completions + __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results" + + # Important not to quote the variable for count to work + set -l numComps (count $__%[1]s_comp_results) + __%[1]s_debug "numComps: $numComps" + + if test $numComps -eq 1; and test $nospace -ne 0 + # We must first split on \t to get rid of the descriptions to be + # able to check what the actual completion will be. + # We don't need descriptions anyway since there is only a single + # real completion which the shell will expand immediately. + set -l split (string split --max 1 \t $__%[1]s_comp_results[1]) + + # Fish won't add a space if the completion ends with any + # of the following characters: @=/:., + set -l lastChar (string sub -s -1 -- $split) + if not string match -r -q "[@=/:.,]" -- "$lastChar" + # In other cases, to support the "nospace" directive we trick the shell + # by outputting an extra, longer completion. + __%[1]s_debug "Adding second completion to perform nospace directive" + set --global __%[1]s_comp_results $split[1] $split[1]. + __%[1]s_debug "Completions are now: $__%[1]s_comp_results" + end + end + + if test $numComps -eq 0; and test $nofiles -eq 0 + # To be consistent with bash and zsh, we only trigger file + # completion when there are no other completions + __%[1]s_debug "Requesting file completion" + return 1 + end end - # If we don't want file completion, we must return true even if there - # are no completions found. This is because fish will perform the last - # completion command, even if its condition is false, if no other - # completion command was triggered - return (not set --query __%[1]s_comp_do_file_comp) + return 0 end # Since Fish completions are only loaded once the user triggers them, we trigger them ourselves # so we can properly delete any completions provided by another script. -# The space after the the program name is essential to trigger completion for the program -# and not completion of the program name itself. -complete --do-complete "%[2]s " > /dev/null 2>&1 -# Using '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. +# Only do this if the program can be found, or else fish may print some errors; besides, +# the existing completions will only be loaded if the program can be found. +if type -q "%[2]s" + # The space after the program name is essential to trigger completion for the program + # and not completion of the program name itself. + # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. + complete --do-complete "%[2]s " > /dev/null 2>&1 +end # Remove any pre-existing completions for the program since we will be handling all of them. complete -c %[2]s -e -# The order in which the below two lines are defined is very important so that __%[1]s_prepare_completions -# is called first. It is __%[1]s_prepare_completions that sets up the __%[1]s_comp_do_file_comp variable. -# -# This completion will be run second as complete commands are added FILO. -# It triggers file completion choices when __%[1]s_comp_do_file_comp is set. -complete -c %[2]s -n 'set --query __%[1]s_comp_do_file_comp' - -# This completion will be run first as complete commands are added FILO. -# The call to __%[1]s_prepare_completions will setup both __%[1]s_comp_results and __%[1]s_comp_do_file_comp. -# It provides the program's completion choices. +# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results +# which provides the program's completion choices. complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' `, nameForVar, name, compCmd, diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod deleted file mode 100644 index ff561440..00000000 --- a/vendor/github.com/spf13/cobra/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/spf13/cobra - -go 1.12 - -require ( - github.com/cpuguy83/go-md2man/v2 v2.0.0 - github.com/inconshreveable/mousetrap v1.0.0 - github.com/mitchellh/go-homedir v1.1.0 - github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.7.0 - gopkg.in/yaml.v2 v2.4.0 -) diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum deleted file mode 100644 index 9328ee3e..00000000 --- a/vendor/github.com/spf13/cobra/go.sum +++ /dev/null @@ -1,313 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index c55be71c..62d719f0 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -50,7 +50,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { if ($Command.Length -gt $CursorPosition) { $Command=$Command.Substring(0,$CursorPosition) } - __%[1]s_debug "Truncated command: $Command" + __%[1]s_debug "Truncated command: $Command" $ShellCompDirectiveError=%[3]d $ShellCompDirectiveNoSpace=%[4]d @@ -58,7 +58,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { $ShellCompDirectiveFilterFileExt=%[6]d $ShellCompDirectiveFilterDirs=%[7]d - # Prepare the command to request completions for the program. + # Prepare the command to request completions for the program. # Split the command at the first space to separate the program and arguments. $Program,$Arguments = $Command.Split(" ",2) $RequestComp="$Program %[2]s $Arguments" @@ -86,7 +86,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { # We add an extra empty parameter so we can indicate this to the go method. __%[1]s_debug "Adding extra empty parameter" `+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` -`+" $RequestComp=\"$RequestComp\" + ' `\"`\"' "+` +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` } __%[1]s_debug "Calling $RequestComp" @@ -140,19 +140,6 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { $Space = "" } - if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { - __%[1]s_debug "ShellCompDirectiveNoFileComp is called" - - if ($Values.Length -eq 0) { - # Just print an empty string here so the - # shell does not start to complete paths. - # We cannot use CompletionResult here because - # it does not accept an empty string as argument. - "" - return - } - } - if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" @@ -165,20 +152,33 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { # filter the result $_.Name -like "$WordToComplete*" - # Join the flag back if we have a equal sign flag + # Join the flag back if we have an equal sign flag if ( $IsEqualFlag ) { __%[1]s_debug "Join the equal sign flag back to the completion value" $_.Name = $Flag + "=" + $_.Name } } + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { + __%[1]s_debug "ShellCompDirectiveNoFileComp is called" + + if ($Values.Length -eq 0) { + # Just print an empty string here so the + # shell does not start to complete paths. + # We cannot use CompletionResult here because + # it does not accept an empty string as argument. + "" + return + } + } + # Get the current mode $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function __%[1]s_debug "Mode: $Mode" $Values | ForEach-Object { - # store temporay because switch will overwrite $_ + # store temporary because switch will overwrite $_ $comp = $_ # PowerShell supports three different completion modes @@ -233,7 +233,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { Default { # Like MenuComplete but we don't want to add a space here because # the user need to press space anyway to get the completion. - # Description will not be shown because thats not possible with TabCompleteNext + # Description will not be shown because that's not possible with TabCompleteNext [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") } } diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md index d98a71e3..8410c993 100644 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md @@ -4,6 +4,7 @@ - [Bleve](http://www.blevesearch.com/) - [CockroachDB](http://www.cockroachlabs.com/) - [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) +- [Datree](https://github.com/datreeio/datree) - [Delve](https://github.com/derekparker/delve) - [Docker (distribution)](https://github.com/docker/distribution) - [Etcd](https://etcd.io/) @@ -14,25 +15,36 @@ - [GitHub Labeler](https://github.com/erdaltsksn/gh-label) - [Golangci-lint](https://golangci-lint.run) - [GopherJS](http://www.gopherjs.org/) +- [GoReleaser](https://goreleaser.com) - [Helm](https://helm.sh) - [Hugo](https://gohugo.io) +- [Infracost](https://github.com/infracost/infracost) - [Istio](https://istio.io) - [Kool](https://github.com/kool-dev/kool) - [Kubernetes](http://kubernetes.io/) - [Linkerd](https://linkerd.io/) - [Mattermost-server](https://github.com/mattermost/mattermost-server) +- [Mercure](https://mercure.rocks/) +- [Meroxa CLI](https://github.com/meroxa/cli) - [Metal Stack CLI](https://github.com/metal-stack/metalctl) - [Moby (former Docker)](https://github.com/moby/moby) +- [Moldy](https://github.com/Moldy-Community/moldy) +- [Multi-gitter](https://github.com/lindell/multi-gitter) - [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) +- [nFPM](https://nfpm.goreleaser.com) - [OpenShift](https://www.openshift.com/) - [Ory Hydra](https://github.com/ory/hydra) - [Ory Kratos](https://github.com/ory/kratos) - [Pouch](https://github.com/alibaba/pouch) - [ProjectAtomic (enterprise)](http://www.projectatomic.io/) - [Prototool](https://github.com/uber/prototool) +- [QRcp](https://github.com/claudiodangelis/qrcp) - [Random](https://github.com/erdaltsksn/random) - [Rclone](https://rclone.org/) +- [Scaleway CLI](https://github.com/scaleway/scaleway-cli) - [Skaffold](https://skaffold.dev/) - [Tendermint](https://github.com/tendermint/tendermint) - [Twitch CLI](https://github.com/twitchdev/twitch-cli) +- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) +- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) - [Werf](https://werf.io/) diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md index cd533ac3..03add869 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ b/vendor/github.com/spf13/cobra/shell_completions.md @@ -7,6 +7,15 @@ The currently supported shells are: - fish - PowerShell +Cobra will automatically provide your program with a fully functional `completion` command, +similarly to how it provides the `help` command. + +## Creating your own completion command + +If you do not wish to use the default `completion` command, you can choose to +provide your own, which will take precedence over the default one. (This also provides +backwards-compatibility with programs that already have their own `completion` command.) + If you are using the generator, you can create a completion command by running ```bash @@ -19,17 +28,17 @@ and then modifying the generated `cmd/completion.go` file to look something like var completionCmd = &cobra.Command{ Use: "completion [bash|zsh|fish|powershell]", Short: "Generate completion script", - Long: `To load completions: + Long: fmt.Sprintf(`To load completions: Bash: - $ source <(yourprogram completion bash) + $ source <(%[1]s completion bash) # To load completions for each session, execute once: # Linux: - $ yourprogram completion bash > /etc/bash_completion.d/yourprogram + $ %[1]s completion bash > /etc/bash_completion.d/%[1]s # macOS: - $ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram + $ %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s Zsh: @@ -39,25 +48,25 @@ Zsh: $ echo "autoload -U compinit; compinit" >> ~/.zshrc # To load completions for each session, execute once: - $ yourprogram completion zsh > "${fpath[1]}/_yourprogram" + $ %[1]s completion zsh > "${fpath[1]}/_%[1]s" # You will need to start a new shell for this setup to take effect. fish: - $ yourprogram completion fish | source + $ %[1]s completion fish | source # To load completions for each session, execute once: - $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish + $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish PowerShell: - PS> yourprogram completion powershell | Out-String | Invoke-Expression + PS> %[1]s completion powershell | Out-String | Invoke-Expression # To load completions for every new session, run: - PS> yourprogram completion powershell > yourprogram.ps1 + PS> %[1]s completion powershell > %[1]s.ps1 # and source this file from your PowerShell profile. -`, +`,cmd.Root().Name()), DisableFlagsInUseLine: true, ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, Args: cobra.ExactValidArgs(1), @@ -70,7 +79,7 @@ PowerShell: case "fish": cmd.Root().GenFishCompletion(os.Stdout, true) case "powershell": - cmd.Root().GenPowerShellCompletion(os.Stdout) + cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) } }, } @@ -78,6 +87,26 @@ PowerShell: **Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. +## Adapting the default completion command + +Cobra provides a few options for the default `completion` command. To configure such options you must set +the `CompletionOptions` field on the *root* command. + +To tell Cobra *not* to provide the default `completion` command: +``` +rootCmd.CompletionOptions.DisableDefaultCmd = true +``` + +To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands: +``` +rootCmd.CompletionOptions.DisableNoDescFlag = true +``` + +To tell Cobra to completely disable descriptions for completions: +``` +rootCmd.CompletionOptions.DisableDescriptions = true +``` + # Customizing completions The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. @@ -323,7 +352,10 @@ cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, ``` ### Descriptions for completions -`zsh`, `fish` and `powershell` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh: +Cobra provides support for completion descriptions. Such descriptions are supported for each shell +(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)). +For commands and flags, Cobra will provide the descriptions automatically, based on usage information. +For example, using zsh: ``` $ helm s[tab] search -- search for a keyword in charts @@ -336,7 +368,7 @@ $ helm s[tab] search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) ``` -Cobra allows you to add annotations to your own completions. Simply add the annotation text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: +Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: ```go ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp @@ -371,6 +403,37 @@ completion firstcommand secondcommand For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. Please refer to [Bash Completions](bash_completions.md) for details. +### Bash completion V2 + +Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling +`GenBashCompletion()` or `GenBashCompletionFile()`. + +A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or +`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion +(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion +solution described in this document. +Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash +completion V2 solution which provides the following extra features: +- Supports completion descriptions (like the other shells) +- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines) +- Streamlined user experience thanks to a completion behavior aligned with the other shells + +`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()` +you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra +will provide the description automatically based on usage information. You can choose to make this option configurable by +your users. + +``` +# With descriptions +$ helm s[tab][tab] +search (search for a keyword in charts) status (display the status of the named release) +show (show information of a chart) + +# Without descriptions +$ helm s[tab][tab] +search show status +``` +**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command. ## Zsh completions Cobra supports native zsh completion generated from the root `cobra.Command`. diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md new file mode 100644 index 00000000..e87cdf21 --- /dev/null +++ b/vendor/github.com/spf13/cobra/user_guide.md @@ -0,0 +1,638 @@ +# User Guide + +While you are welcome to provide your own organization, typically a Cobra-based +application will follow the following organizational structure: + +``` + ▾ appName/ + ▾ cmd/ + add.go + your.go + commands.go + here.go + main.go +``` + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. + +```go +package main + +import ( + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +## Using the Cobra Generator + +Cobra provides its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +For complete details on using the Cobra generator, please read [The Cobra Generator README](https://github.com/spf13/cobra/blob/master/cobra/README.md) + +## Using the Cobra Library + +To manually implement Cobra you need to create a bare main.go file and a rootCmd file. +You will optionally provide additional commands as you see fit. + +### Create rootCmd + +Cobra doesn't require any special constructors. Simply create your commands. + +Ideally you place this in app/cmd/root.go: + +```go +var rootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} +``` + +You will additionally define flags and handle configuration in your init() function. + +For example cmd/root.go: + +```go +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + // Used for flags. + cfgFile string + userLicense string + + rootCmd = &cobra.Command{ + Use: "cobra", + Short: "A generator for Cobra based Applications", + Long: `Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + } +) + +// Execute executes the root command. +func Execute() error { + return rootCmd.Execute() +} + +func init() { + cobra.OnInitialize(initConfig) + + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") + rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") + rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") + + rootCmd.AddCommand(addCmd) + rootCmd.AddCommand(initCmd) +} + +func initConfig() { + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := os.UserHomeDir() + cobra.CheckErr(err) + + // Search config in home directory with name ".cobra" (without extension). + viper.AddConfigPath(home) + viper.SetConfigType("yaml") + viper.SetConfigName(".cobra") + } + + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +} +``` + +### Create your main.go + +With the root command you need to have your main function execute it. +Execute should be run on the root for clarity, though it can be called on any command. + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. + +```go +package main + +import ( + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +### Create additional commands + +Additional commands can be defined and typically are each given their own file +inside of the cmd/ directory. + +If you wanted to create a version command you would create cmd/version.go and +populate it with the following: + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, +} +``` + +### Returning and handling errors + +If you wish to return an error to the caller of a command, `RunE` can be used. + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(tryCmd) +} + +var tryCmd = &cobra.Command{ + Use: "try", + Short: "Try and possibly fail at something", + RunE: func(cmd *cobra.Command, args []string) error { + if err := someFunc(); err != nil { + return err + } + return nil + }, +} +``` + +The error can then be caught at the execute function call. + +## Working with Flags + +Flags provide modifiers to control how the action command operates. + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + +```go +var Verbose bool +var Source string +``` + +There are two different approaches to assign a flag. + +### Persistent Flags + +A flag can be 'persistent', meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags, assign a flag as a persistent flag on the root. + +```go +rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +``` + +### Local Flags + +A flag can also be assigned locally, which will only apply to that specific command. + +```go +localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +``` + +### Local Flag on Parent Commands + +By default, Cobra only parses local flags on the target command, and any local flags on +parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will +parse local flags on each command before executing the target command. + +```go +command := cobra.Command{ + Use: "print [OPTIONS] [COMMANDS]", + TraverseChildren: true, +} +``` + +### Bind Flags with Config + +You can also bind your flags with [viper](https://github.com/spf13/viper): +```go +var author string + +func init() { + rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) +} +``` + +In this example, the persistent flag `author` is bound with `viper`. +**Note**: the variable `author` will not be set to the value from config, +when the `--author` flag is provided by user. + +More in [viper documentation](https://github.com/spf13/viper#working-with-flags). + +### Required flags + +Flags are optional by default. If instead you wish your command to report an error +when a flag has not been set, mark it as required: +```go +rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkFlagRequired("region") +``` + +Or, for persistent flags: +```go +rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkPersistentFlagRequired("region") +``` + +## Positional and Custom Arguments + +Validation of positional arguments can be specified using the `Args` field +of `Command`. + +The following validators are built in: + +- `NoArgs` - the command will report an error if there are any positional args. +- `ArbitraryArgs` - the command will accept any args. +- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. +- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. +- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. +- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. +- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` +- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. +- `MatchAll(pargs ...PositionalArgs)` - enables combining existing checks with arbitrary other checks (e.g. you want to check the ExactArgs length along with other qualities). + +An example of setting the custom validator: + +```go +var cmd = &cobra.Command{ + Short: "hello", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) < 1 { + return errors.New("requires a color argument") + } + if myapp.IsValidColor(args[0]) { + return nil + } + return fmt.Errorf("invalid color specified: %s", args[0]) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hello, World!") + }, +} +``` + +## Example + +In the example below, we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable, meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + +```go +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func main() { + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. +For many years people have printed back to the screen.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. +Echo works a lot like print, except it has a child command.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Echo: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing +a count and a string.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() +} +``` + +For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). + +## Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally, help will also +support all other commands as input. Say, for instance, you have a command called +'create' without any additional configuration; Cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by Cobra. Nothing beyond the +command and flag definitions are needed. + + $ cobra help + + Cobra is a CLI library for Go that empowers applications. + This application is a tool to generate the needed files + to quickly create a Cobra application. + + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact, you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or your own template for the default command to use +with following functions: + +```go +cmd.SetHelpCommand(cmd *Command) +cmd.SetHelpFunc(f func(*Command, []string)) +cmd.SetHelpTemplate(s string) +``` + +The latter two will also apply to any children commands. + +## Usage Message + +When the user provides an invalid flag or invalid command, Cobra responds by +showing the user the 'usage'. + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of its output. + + $ cobra --invalid + Error: unknown flag: --invalid + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + +### Defining your own usage +You can provide your own usage function or template for Cobra to use. +Like help, the function and template are overridable through public methods: + +```go +cmd.SetUsageFunc(f func(*Command) error) +cmd.SetUsageTemplate(s string) +``` + +## Version Flag + +Cobra adds a top-level '--version' flag if the Version field is set on the root command. +Running an application with the '--version' flag will print the version to stdout using +the version template. The template can be customized using the +`cmd.SetVersionTemplate(s string)` function. + +## PreRun and PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistentPostRun` + +An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + rootCmd.Execute() + fmt.Println() + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + rootCmd.Execute() +} +``` + +Output: +``` +Inside rootCmd PersistentPreRun with args: [] +Inside rootCmd PreRun with args: [] +Inside rootCmd Run with args: [] +Inside rootCmd PostRun with args: [] +Inside rootCmd PersistentPostRun with args: [] + +Inside rootCmd PersistentPreRun with args: [arg1 arg2] +Inside subCmd PreRun with args: [arg1 arg2] +Inside subCmd Run with args: [arg1 arg2] +Inside subCmd PostRun with args: [arg1 arg2] +Inside subCmd PersistentPostRun with args: [arg1 arg2] +``` + +## Suggestions when "unknown command" happens + +Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: + +``` +$ hugo srever +Error: unknown command "srever" for "hugo" + +Did you mean this? + server + +Run 'hugo --help' for usage. +``` + +Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. + +If you need to disable suggestions or tweak the string distance in your command, use: + +```go +command.DisableSuggestions = true +``` + +or + +```go +command.SuggestionsMinimumDistance = 1 +``` + +You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: + +``` +$ kubectl remove +Error: unknown command "remove" for "kubectl" + +Did you mean this? + delete + +Run 'kubectl help' for usage. +``` + +## Generating documentation for your command + +Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). + +## Generating shell completions + +Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 2e840285..624adab5 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -95,7 +95,7 @@ _%[1]s() local shellCompDirectiveFilterFileExt=%[6]d local shellCompDirectiveFilterDirs=%[7]d - local lastParam lastChar flagPrefix requestComp out directive compCount comp lastComp + local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace local -a completions __%[1]s_debug "\n========= starting completion logic ==========" @@ -163,7 +163,6 @@ _%[1]s() return fi - compCount=0 while IFS='\n' read -r comp; do if [ -n "$comp" ]; then # If requested, completions are returned with a description. @@ -175,13 +174,17 @@ _%[1]s() local tab=$(printf '\t') comp=${comp//$tab/:} - ((compCount++)) __%[1]s_debug "Adding completion: ${comp}" completions+=${comp} lastComp=$comp fi done < <(printf "%%s\n" "${out[@]}") + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + __%[1]s_debug "Activating nospace." + noSpace="-S ''" + fi + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then # File extension filtering local filteringCmd @@ -199,7 +202,7 @@ _%[1]s() _arguments '*:filename:'"$filteringCmd" elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then # File completion for directories only - local subDir + local subdir subdir="${completions[1]}" if [ -n "$subdir" ]; then __%[1]s_debug "Listing directories in $subdir" @@ -208,31 +211,46 @@ _%[1]s() __%[1]s_debug "Listing directories in ." fi + local result _arguments '*:dirname:_files -/'" ${flagPrefix}" + result=$? if [ -n "$subdir" ]; then popd >/dev/null 2>&1 fi - elif [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ] && [ ${compCount} -eq 1 ]; then - __%[1]s_debug "Activating nospace." - # We can use compadd here as there is no description when - # there is only one completion. - compadd -S '' "${lastComp}" - elif [ ${compCount} -eq 0 ]; then - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - __%[1]s_debug "deactivating file completion" + return $result + else + __%[1]s_debug "Calling _describe" + if eval _describe "completions" completions $flagPrefix $noSpace; then + __%[1]s_debug "_describe found some completions" + + # Return the success of having called _describe + return 0 else - # Perform file completion - __%[1]s_debug "activating file completion" - _arguments '*:filename:_files'" ${flagPrefix}" + __%[1]s_debug "_describe did not find completions." + __%[1]s_debug "Checking if we should do file completion." + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + __%[1]s_debug "deactivating file completion" + + # We must return an error code here to let zsh know that there were no + # completions found by _describe; this is what will trigger other + # matching algorithms to attempt to find completions. + # For example zsh can match letters in the middle of words. + return 1 + else + # Perform file completion + __%[1]s_debug "Activating file completion" + + # We must return the result of this command, so it must be the + # last command, or else we must store its result to return it. + _arguments '*:filename:_files'" ${flagPrefix}" + fi fi - else - _describe "completions" completions $(echo $flagPrefix) fi } # don't run the completion function when being source-ed or eval-ed if [ "$funcstack[1]" = "_%[1]s" ]; then - _%[1]s + _%[1]s fi `, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, diff --git a/vendor/github.com/spf13/jwalterweatherman/go.mod b/vendor/github.com/spf13/jwalterweatherman/go.mod deleted file mode 100644 index 1dbcfd3e..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module github.com/spf13/jwalterweatherman - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 -) diff --git a/vendor/github.com/spf13/pflag/go.mod b/vendor/github.com/spf13/pflag/go.mod deleted file mode 100644 index b2287eec..00000000 --- a/vendor/github.com/spf13/pflag/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/spf13/pflag - -go 1.12 diff --git a/vendor/github.com/spf13/pflag/go.sum b/vendor/github.com/spf13/pflag/go.sum deleted file mode 100644 index e69de29b..00000000 diff --git a/vendor/github.com/spf13/viper/.golangci.yml b/vendor/github.com/spf13/viper/.golangci.yml index 4f970acb..52e77eef 100644 --- a/vendor/github.com/spf13/viper/.golangci.yml +++ b/vendor/github.com/spf13/viper/.golangci.yml @@ -20,7 +20,6 @@ linters: - exhaustive - exportloopref - gci - - goconst - gofmt - gofumpt - goimports @@ -62,6 +61,7 @@ linters: # - gochecknoglobals # - gochecknoinits # - gocognit + # - goconst # - gocritic # - gocyclo # - godot diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile index b0f9acf2..1279096f 100644 --- a/vendor/github.com/spf13/viper/Makefile +++ b/vendor/github.com/spf13/viper/Makefile @@ -15,8 +15,8 @@ TEST_FORMAT = short-verbose endif # Dependency versions -GOTESTSUM_VERSION = 1.6.4 -GOLANGCI_VERSION = 1.40.1 +GOTESTSUM_VERSION = 1.7.0 +GOLANGCI_VERSION = 1.43.0 # Add the ability to override some variables # Use with care diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index 82bff12e..9712e705 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -119,7 +119,7 @@ viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search viper.AddConfigPath(".") // optionally look for config in the working directory err := viper.ReadInConfig() // Find and read the config file if err != nil { // Handle errors reading the config file - panic(fmt.Errorf("Fatal error config file: %s \n", err)) + panic(fmt.Errorf("Fatal error config file: %w \n", err)) } ``` @@ -127,11 +127,11 @@ You can handle the specific case where no config file is found like this: ```go if err := viper.ReadInConfig(); err != nil { - if _, ok := err.(viper.ConfigFileNotFoundError); ok { - // Config file not found; ignore error if desired - } else { - // Config file was found but another error was produced - } + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + // Config file not found; ignore error if desired + } else { + // Config file was found but another error was produced + } } // Config file found and successfully parsed @@ -175,10 +175,10 @@ Optionally you can provide a function for Viper to run each time a change occurs **Make sure you add all of the configPaths prior to calling `WatchConfig()`** ```go -viper.WatchConfig() viper.OnConfigChange(func(e fsnotify.Event) { fmt.Println("Config file changed:", e.Name) }) +viper.WatchConfig() ``` ### Reading Config from io.Reader @@ -354,7 +354,7 @@ func main() { i := viper.GetInt("flagname") // retrieve value from viper - ... + // ... } ``` @@ -503,18 +503,18 @@ runtime_viper.Unmarshal(&runtime_conf) // open a goroutine to watch remote changes forever go func(){ for { - time.Sleep(time.Second * 5) // delay after each request - - // currently, only tested with etcd support - err := runtime_viper.WatchRemoteConfig() - if err != nil { - log.Errorf("unable to read remote config: %v", err) - continue - } - - // unmarshal new config into our runtime config struct. you can also use channel - // to implement a signal to notify the system of the changes - runtime_viper.Unmarshal(&runtime_conf) + time.Sleep(time.Second * 5) // delay after each request + + // currently, only tested with etcd support + err := runtime_viper.WatchRemoteConfig() + if err != nil { + log.Errorf("unable to read remote config: %v", err) + continue + } + + // unmarshal new config into our runtime config struct. you can also use channel + // to implement a signal to notify the system of the changes + runtime_viper.Unmarshal(&runtime_conf) } }() ``` @@ -546,7 +546,7 @@ Example: ```go viper.GetString("logfile") // case-insensitive Setting & Getting if viper.GetBool("verbose") { - fmt.Println("verbose enabled") + fmt.Println("verbose enabled") } ``` ### Accessing nested keys @@ -669,7 +669,7 @@ So instead of doing that let's pass a Viper instance to the constructor that rep ```go cache1Config := viper.Sub("cache.cache1") if cache1Config == nil { // Sub returns nil if the key cannot be found - panic("cache configuration not found") + panic("cache configuration not found") } cache1 := NewCache(cache1Config) @@ -681,10 +681,10 @@ Internally, the `NewCache` function can address `max-items` and `item-size` keys ```go func NewCache(v *Viper) *Cache { - return &Cache{ - MaxItems: v.GetInt("max-items"), - ItemSize: v.GetInt("item-size"), - } + return &Cache{ + MaxItems: v.GetInt("max-items"), + ItemSize: v.GetInt("item-size"), + } } ``` @@ -726,18 +726,18 @@ you have to change the delimiter: v := viper.NewWithOptions(viper.KeyDelimiter("::")) v.SetDefault("chart::values", map[string]interface{}{ - "ingress": map[string]interface{}{ - "annotations": map[string]interface{}{ - "traefik.frontend.rule.type": "PathPrefix", - "traefik.ingress.kubernetes.io/ssl-redirect": "true", - }, - }, + "ingress": map[string]interface{}{ + "annotations": map[string]interface{}{ + "traefik.frontend.rule.type": "PathPrefix", + "traefik.ingress.kubernetes.io/ssl-redirect": "true", + }, + }, }) type config struct { Chart struct{ - Values map[string]interface{} - } + Values map[string]interface{} + } } var C config @@ -778,6 +778,15 @@ if err != nil { Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. +### Decoding custom formats + +A frequently requested feature for Viper is adding more value formats and decoders. +For example, parsing character (dot, comma, semicolon, etc) separated strings into slices. + +This is already available in Viper using mapstructure decode hooks. + +Read more about the details in [this blog post](https://sagikazarmark.hu/blog/decoding-custom-formats-with-viper/). + ### Marshalling to string You may need to marshal all the settings held in viper into a string rather than write them to a file. @@ -785,17 +794,17 @@ You can use your favorite format's marshaller with the config returned by `AllSe ```go import ( - yaml "gopkg.in/yaml.v2" - // ... + yaml "gopkg.in/yaml.v2" + // ... ) func yamlStringSettings() string { - c := viper.AllSettings() - bs, err := yaml.Marshal(c) - if err != nil { - log.Fatalf("unable to marshal config to YAML: %v", err) - } - return string(bs) + c := viper.AllSettings() + bs, err := yaml.Marshal(c) + if err != nil { + log.Fatalf("unable to marshal config to YAML: %v", err) + } + return string(bs) } ``` diff --git a/vendor/github.com/spf13/viper/fs.go b/vendor/github.com/spf13/viper/fs.go new file mode 100644 index 00000000..ecb1769e --- /dev/null +++ b/vendor/github.com/spf13/viper/fs.go @@ -0,0 +1,65 @@ +//go:build go1.16 && finder +// +build go1.16,finder + +package viper + +import ( + "errors" + "io/fs" + "path" +) + +type finder struct { + paths []string + fileNames []string + extensions []string + + withoutExtension bool +} + +func (f finder) Find(fsys fs.FS) (string, error) { + for _, searchPath := range f.paths { + for _, fileName := range f.fileNames { + for _, extension := range f.extensions { + filePath := path.Join(searchPath, fileName+"."+extension) + + ok, err := fileExists(fsys, filePath) + if err != nil { + return "", err + } + + if ok { + return filePath, nil + } + } + + if f.withoutExtension { + filePath := path.Join(searchPath, fileName) + + ok, err := fileExists(fsys, filePath) + if err != nil { + return "", err + } + + if ok { + return filePath, nil + } + } + } + } + + return "", nil +} + +func fileExists(fsys fs.FS, filePath string) (bool, error) { + fileInfo, err := fs.Stat(fsys, filePath) + if err == nil { + return !fileInfo.IsDir(), nil + } + + if errors.Is(err, fs.ErrNotExist) { + return false, nil + } + + return false, err +} diff --git a/vendor/github.com/spf13/viper/go.mod b/vendor/github.com/spf13/viper/go.mod deleted file mode 100644 index 145e0a10..00000000 --- a/vendor/github.com/spf13/viper/go.mod +++ /dev/null @@ -1,21 +0,0 @@ -module github.com/spf13/viper - -go 1.12 - -require ( - github.com/bketelsen/crypt v0.0.4 - github.com/fsnotify/fsnotify v1.4.9 - github.com/hashicorp/hcl v1.0.0 - github.com/magiconair/properties v1.8.5 - github.com/mitchellh/mapstructure v1.4.1 - github.com/pelletier/go-toml v1.9.3 - github.com/smartystreets/goconvey v1.6.4 // indirect - github.com/spf13/afero v1.6.0 - github.com/spf13/cast v1.3.1 - github.com/spf13/jwalterweatherman v1.1.0 - github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.7.0 - github.com/subosito/gotenv v1.2.0 - gopkg.in/ini.v1 v1.62.0 - gopkg.in/yaml.v2 v2.4.0 -) diff --git a/vendor/github.com/spf13/viper/go.sum b/vendor/github.com/spf13/viper/go.sum deleted file mode 100644 index 27730e2a..00000000 --- a/vendor/github.com/spf13/viper/go.sum +++ /dev/null @@ -1,632 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0 h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.4 h1:w/jqZtC9YD4DS/Vp9GhWfWcCpuAL58oTnLoI8vE9YHU= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/vendor/github.com/spf13/viper/internal/encoding/decoder.go new file mode 100644 index 00000000..08b1bb66 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/decoder.go @@ -0,0 +1,61 @@ +package encoding + +import ( + "sync" +) + +// Decoder decodes the contents of b into a v representation. +// It's primarily used for decoding contents of a file into a map[string]interface{}. +type Decoder interface { + Decode(b []byte, v interface{}) error +} + +const ( + // ErrDecoderNotFound is returned when there is no decoder registered for a format. + ErrDecoderNotFound = encodingError("decoder not found for this format") + + // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format. + ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format") +) + +// DecoderRegistry can choose an appropriate Decoder based on the provided format. +type DecoderRegistry struct { + decoders map[string]Decoder + + mu sync.RWMutex +} + +// NewDecoderRegistry returns a new, initialized DecoderRegistry. +func NewDecoderRegistry() *DecoderRegistry { + return &DecoderRegistry{ + decoders: make(map[string]Decoder), + } +} + +// RegisterDecoder registers a Decoder for a format. +// Registering a Decoder for an already existing format is not supported. +func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error { + e.mu.Lock() + defer e.mu.Unlock() + + if _, ok := e.decoders[format]; ok { + return ErrDecoderFormatAlreadyRegistered + } + + e.decoders[format] = enc + + return nil +} + +// Decode calls the underlying Decoder based on the format. +func (e *DecoderRegistry) Decode(format string, b []byte, v interface{}) error { + e.mu.RLock() + decoder, ok := e.decoders[format] + e.mu.RUnlock() + + if !ok { + return ErrDecoderNotFound + } + + return decoder.Decode(b, v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/vendor/github.com/spf13/viper/internal/encoding/encoder.go new file mode 100644 index 00000000..82c7996c --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/encoder.go @@ -0,0 +1,60 @@ +package encoding + +import ( + "sync" +) + +// Encoder encodes the contents of v into a byte representation. +// It's primarily used for encoding a map[string]interface{} into a file format. +type Encoder interface { + Encode(v interface{}) ([]byte, error) +} + +const ( + // ErrEncoderNotFound is returned when there is no encoder registered for a format. + ErrEncoderNotFound = encodingError("encoder not found for this format") + + // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format. + ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format") +) + +// EncoderRegistry can choose an appropriate Encoder based on the provided format. +type EncoderRegistry struct { + encoders map[string]Encoder + + mu sync.RWMutex +} + +// NewEncoderRegistry returns a new, initialized EncoderRegistry. +func NewEncoderRegistry() *EncoderRegistry { + return &EncoderRegistry{ + encoders: make(map[string]Encoder), + } +} + +// RegisterEncoder registers an Encoder for a format. +// Registering a Encoder for an already existing format is not supported. +func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error { + e.mu.Lock() + defer e.mu.Unlock() + + if _, ok := e.encoders[format]; ok { + return ErrEncoderFormatAlreadyRegistered + } + + e.encoders[format] = enc + + return nil +} + +func (e *EncoderRegistry) Encode(format string, v interface{}) ([]byte, error) { + e.mu.RLock() + encoder, ok := e.encoders[format] + e.mu.RUnlock() + + if !ok { + return nil, ErrEncoderNotFound + } + + return encoder.Encode(v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/error.go b/vendor/github.com/spf13/viper/internal/encoding/error.go new file mode 100644 index 00000000..e4cde02d --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/error.go @@ -0,0 +1,7 @@ +package encoding + +type encodingError string + +func (e encodingError) Error() string { + return string(e) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go new file mode 100644 index 00000000..f3e4ab12 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go @@ -0,0 +1,40 @@ +package hcl + +import ( + "bytes" + "encoding/json" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/printer" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding. +// TODO: add printer config to the codec? +type Codec struct{} + +func (Codec) Encode(v interface{}) ([]byte, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // TODO: use printer.Format? Is the trailing newline an issue? + + ast, err := hcl.Parse(string(b)) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + + err = printer.Fprint(&buf, ast.Node) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (Codec) Decode(b []byte, v interface{}) error { + return hcl.Unmarshal(b, v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go new file mode 100644 index 00000000..dff9ec98 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go @@ -0,0 +1,17 @@ +package json + +import ( + "encoding/json" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding. +type Codec struct{} + +func (Codec) Encode(v interface{}) ([]byte, error) { + // TODO: expose prefix and indent in the Codec as setting? + return json.MarshalIndent(v, "", " ") +} + +func (Codec) Decode(b []byte, v interface{}) error { + return json.Unmarshal(b, v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go new file mode 100644 index 00000000..c043802b --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go @@ -0,0 +1,45 @@ +package toml + +import ( + "github.com/pelletier/go-toml" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. +type Codec struct{} + +func (Codec) Encode(v interface{}) ([]byte, error) { + if m, ok := v.(map[string]interface{}); ok { + t, err := toml.TreeFromMap(m) + if err != nil { + return nil, err + } + + s, err := t.ToTomlString() + if err != nil { + return nil, err + } + + return []byte(s), nil + } + + return toml.Marshal(v) +} + +func (Codec) Decode(b []byte, v interface{}) error { + tree, err := toml.LoadBytes(b) + if err != nil { + return err + } + + if m, ok := v.(*map[string]interface{}); ok { + vmap := *m + tmap := tree.ToMap() + for k, v := range tmap { + vmap[k] = v + } + + return nil + } + + return tree.Unmarshal(v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go new file mode 100644 index 00000000..f94b2699 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -0,0 +1,14 @@ +package yaml + +import "gopkg.in/yaml.v2" + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. +type Codec struct{} + +func (Codec) Encode(v interface{}) ([]byte, error) { + return yaml.Marshal(v) +} + +func (Codec) Decode(b []byte, v interface{}) error { + return yaml.Unmarshal(b, v) +} diff --git a/vendor/github.com/spf13/viper/logger.go b/vendor/github.com/spf13/viper/logger.go new file mode 100644 index 00000000..0115067a --- /dev/null +++ b/vendor/github.com/spf13/viper/logger.go @@ -0,0 +1,77 @@ +package viper + +import ( + "fmt" + + jww "github.com/spf13/jwalterweatherman" +) + +// Logger is a unified interface for various logging use cases and practices, including: +// - leveled logging +// - structured logging +type Logger interface { + // Trace logs a Trace event. + // + // Even more fine-grained information than Debug events. + // Loggers not supporting this level should fall back to Debug. + Trace(msg string, keyvals ...interface{}) + + // Debug logs a Debug event. + // + // A verbose series of information events. + // They are useful when debugging the system. + Debug(msg string, keyvals ...interface{}) + + // Info logs an Info event. + // + // General information about what's happening inside the system. + Info(msg string, keyvals ...interface{}) + + // Warn logs a Warn(ing) event. + // + // Non-critical events that should be looked at. + Warn(msg string, keyvals ...interface{}) + + // Error logs an Error event. + // + // Critical events that require immediate attention. + // Loggers commonly provide Fatal and Panic levels above Error level, + // but exiting and panicing is out of scope for a logging library. + Error(msg string, keyvals ...interface{}) +} + +type jwwLogger struct{} + +func (jwwLogger) Trace(msg string, keyvals ...interface{}) { + jww.TRACE.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Debug(msg string, keyvals ...interface{}) { + jww.DEBUG.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Info(msg string, keyvals ...interface{}) { + jww.INFO.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Warn(msg string, keyvals ...interface{}) { + jww.WARN.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Error(msg string, keyvals ...interface{}) { + jww.ERROR.Printf(jwwLogMessage(msg, keyvals...)) +} + +func jwwLogMessage(msg string, keyvals ...interface{}) string { + out := msg + + if len(keyvals) > 0 && len(keyvals)%2 == 1 { + keyvals = append(keyvals, nil) + } + + for i := 0; i <= len(keyvals)-2; i += 2 { + out = fmt.Sprintf("%s %v=%v", out, keyvals[i], keyvals[i+1]) + } + + return out +} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index cee6b242..ee7a86d9 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -18,9 +18,7 @@ import ( "strings" "unicode" - "github.com/spf13/afero" "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" ) // ConfigParseError denotes failing to parse configuration file. @@ -88,26 +86,14 @@ func insensitiviseMap(m map[string]interface{}) { } } -func absPathify(inPath string) string { - jww.INFO.Println("Trying to resolve absolute path to", inPath) +func absPathify(logger Logger, inPath string) string { + logger.Info("trying to resolve absolute path", "path", inPath) if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) { inPath = userHomeDir() + inPath[5:] } - if strings.HasPrefix(inPath, "$") { - end := strings.Index(inPath, string(os.PathSeparator)) - - var value, suffix string - if end == -1 { - value = os.Getenv(inPath[1:]) - } else { - value = os.Getenv(inPath[1:end]) - suffix = inPath[end:] - } - - inPath = value + suffix - } + inPath = os.ExpandEnv(inPath) if filepath.IsAbs(inPath) { return filepath.Clean(inPath) @@ -118,21 +104,9 @@ func absPathify(inPath string) string { return filepath.Clean(p) } - jww.ERROR.Println("Couldn't discover absolute path") - jww.ERROR.Println(err) - return "" -} + logger.Error(fmt.Errorf("could not discover absolute path: %w", err).Error()) -// Check if file Exists -func exists(fs afero.Fs, path string) (bool, error) { - stat, err := fs.Stat(path) - if err == nil { - return !stat.IsDir(), nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err + return "" } func stringInSlice(a string, list []string) bool { diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 60f9868b..4a993589 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -22,7 +22,6 @@ package viper import ( "bytes" "encoding/csv" - "encoding/json" "errors" "fmt" "io" @@ -36,18 +35,19 @@ import ( "time" "github.com/fsnotify/fsnotify" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/printer" "github.com/magiconair/properties" "github.com/mitchellh/mapstructure" - "github.com/pelletier/go-toml" "github.com/spf13/afero" "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" "github.com/spf13/pflag" "github.com/subosito/gotenv" "gopkg.in/ini.v1" - "gopkg.in/yaml.v2" + + "github.com/spf13/viper/internal/encoding" + "github.com/spf13/viper/internal/encoding/hcl" + "github.com/spf13/viper/internal/encoding/json" + "github.com/spf13/viper/internal/encoding/toml" + "github.com/spf13/viper/internal/encoding/yaml" ) // ConfigMarshalError happens when failing to marshal the configuration. @@ -67,8 +67,47 @@ type RemoteResponse struct { Error error } +var ( + encoderRegistry = encoding.NewEncoderRegistry() + decoderRegistry = encoding.NewDecoderRegistry() +) + func init() { v = New() + + { + codec := yaml.Codec{} + + encoderRegistry.RegisterEncoder("yaml", codec) + decoderRegistry.RegisterDecoder("yaml", codec) + + encoderRegistry.RegisterEncoder("yml", codec) + decoderRegistry.RegisterDecoder("yml", codec) + } + + { + codec := json.Codec{} + + encoderRegistry.RegisterEncoder("json", codec) + decoderRegistry.RegisterDecoder("json", codec) + } + + { + codec := toml.Codec{} + + encoderRegistry.RegisterEncoder("toml", codec) + decoderRegistry.RegisterDecoder("toml", codec) + } + + { + codec := hcl.Codec{} + + encoderRegistry.RegisterEncoder("hcl", codec) + decoderRegistry.RegisterDecoder("hcl", codec) + + encoderRegistry.RegisterEncoder("tfvars", codec) + decoderRegistry.RegisterDecoder("tfvars", codec) + } } type remoteConfigFactory interface { @@ -220,6 +259,8 @@ type Viper struct { properties *properties.Properties onConfigChange func(fsnotify.Event) + + logger Logger } // New returns an initialized Viper instance. @@ -227,7 +268,7 @@ func New() *Viper { v := new(Viper) v.keyDelim = "." v.configName = "config" - v.configPermissions = os.FileMode(0644) + v.configPermissions = os.FileMode(0o644) v.fs = afero.NewOsFs() v.config = make(map[string]interface{}) v.override = make(map[string]interface{}) @@ -237,6 +278,7 @@ func New() *Viper { v.env = make(map[string][]string) v.aliases = make(map[string]string) v.typeByDefValue = false + v.logger = jwwLogger{} return v } @@ -292,7 +334,7 @@ func NewWithOptions(opts ...Option) *Viper { // can use it in their testing as well. func Reset() { v = New() - SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} + SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} } @@ -331,7 +373,7 @@ type RemoteProvider interface { } // SupportedExts are universally supported extensions. -var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} +var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} // SupportedRemoteProviders are universally supported remote providers. var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} @@ -477,8 +519,9 @@ func AddConfigPath(in string) { v.AddConfigPath(in) } func (v *Viper) AddConfigPath(in string) { if in != "" { - absin := absPathify(in) - jww.INFO.Println("adding", absin, "to paths to search") + absin := absPathify(v.logger, in) + + v.logger.Info("adding path to search paths", "path", absin) if !stringInSlice(absin, v.configPaths) { v.configPaths = append(v.configPaths, absin) } @@ -502,7 +545,8 @@ func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { return UnsupportedRemoteProviderError(provider) } if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + rp := &defaultRemoteProvider{ endpoint: endpoint, provider: provider, @@ -534,7 +578,8 @@ func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring return UnsupportedRemoteProviderError(provider) } if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + rp := &defaultRemoteProvider{ endpoint: endpoint, provider: provider, @@ -1153,7 +1198,7 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return cast.ToInt(flag.ValueString()) case "bool": return cast.ToBool(flag.ValueString()) - case "stringSlice": + case "stringSlice", "stringArray": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) @@ -1232,7 +1277,7 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return cast.ToInt(flag.ValueString()) case "bool": return cast.ToBool(flag.ValueString()) - case "stringSlice": + case "stringSlice", "stringArray": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) @@ -1350,14 +1395,15 @@ func (v *Viper) registerAlias(alias string, key string) { v.aliases[alias] = key } } else { - jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key)) + v.logger.Warn("creating circular reference alias", "alias", alias, "key", key, "real_key", v.realKey(key)) } } func (v *Viper) realKey(key string) string { newkey, exists := v.aliases[key] if exists { - jww.DEBUG.Println("Alias", key, "to", newkey) + v.logger.Debug("key is an alias", "alias", key, "to", newkey) + return v.realKey(newkey) } return key @@ -1367,11 +1413,13 @@ func (v *Viper) realKey(key string) string { func InConfig(key string) bool { return v.InConfig(key) } func (v *Viper) InConfig(key string) bool { + lcaseKey := strings.ToLower(key) + // if the requested key is an alias, then return the proper key - key = v.realKey(key) + lcaseKey = v.realKey(lcaseKey) + path := strings.Split(lcaseKey, v.keyDelim) - _, exists := v.config[key] - return exists + return v.searchIndexableWithPathPrefixes(v.config, path) != nil } // SetDefault sets the default value for this key. @@ -1416,7 +1464,7 @@ func (v *Viper) Set(key string, value interface{}) { func ReadInConfig() error { return v.ReadInConfig() } func (v *Viper) ReadInConfig() error { - jww.INFO.Println("Attempting to read in config file") + v.logger.Info("attempting to read in config file") filename, err := v.getConfigFile() if err != nil { return err @@ -1426,7 +1474,7 @@ func (v *Viper) ReadInConfig() error { return UnsupportedConfigError(v.getConfigType()) } - jww.DEBUG.Println("Reading file: ", filename) + v.logger.Debug("reading file", "file", filename) file, err := afero.ReadFile(v.fs, filename) if err != nil { return err @@ -1447,7 +1495,7 @@ func (v *Viper) ReadInConfig() error { func MergeInConfig() error { return v.MergeInConfig() } func (v *Viper) MergeInConfig() error { - jww.INFO.Println("Attempting to merge in config file") + v.logger.Info("attempting to merge in config file") filename, err := v.getConfigFile() if err != nil { return err @@ -1538,11 +1586,12 @@ func (v *Viper) SafeWriteConfigAs(filename string) error { } func (v *Viper) writeConfig(filename string, force bool) error { - jww.INFO.Println("Attempting to write configuration to file.") + v.logger.Info("attempting to write configuration to file") + var configType string ext := filepath.Ext(filename) - if ext != "" { + if ext != "" && ext != filepath.Base(filename) { configType = ext[1:] } else { configType = v.configType @@ -1584,35 +1633,12 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { buf := new(bytes.Buffer) buf.ReadFrom(in) - switch strings.ToLower(v.getConfigType()) { - case "yaml", "yml": - if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "json": - if err := json.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "hcl": - obj, err := hcl.Parse(buf.String()) - if err != nil { - return ConfigParseError{err} - } - if err = hcl.DecodeObject(&c, obj); err != nil { - return ConfigParseError{err} - } - - case "toml": - tree, err := toml.LoadReader(buf) + switch format := strings.ToLower(v.getConfigType()); format { + case "yaml", "yml", "json", "toml", "hcl", "tfvars": + err := decoderRegistry.Decode(format, buf.Bytes(), &c) if err != nil { return ConfigParseError{err} } - tmap := tree.ToMap() - for k, v := range tmap { - c[k] = v - } case "dotenv", "env": env, err := gotenv.StrictParse(buf) @@ -1665,26 +1691,13 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { func (v *Viper) marshalWriter(f afero.File, configType string) error { c := v.AllSettings() switch configType { - case "json": - b, err := json.MarshalIndent(c, "", " ") - if err != nil { - return ConfigMarshalError{err} - } - _, err = f.WriteString(string(b)) + case "yaml", "yml", "json", "toml", "hcl", "tfvars": + b, err := encoderRegistry.Encode(configType, c) if err != nil { return ConfigMarshalError{err} } - case "hcl": - b, err := json.Marshal(c) - if err != nil { - return ConfigMarshalError{err} - } - ast, err := hcl.Parse(string(b)) - if err != nil { - return ConfigMarshalError{err} - } - err = printer.Fprint(f, ast.Node) + _, err = f.WriteString(string(b)) if err != nil { return ConfigMarshalError{err} } @@ -1717,25 +1730,6 @@ func (v *Viper) marshalWriter(f afero.File, configType string) error { return ConfigMarshalError{err} } - case "toml": - t, err := toml.TreeFromMap(c) - if err != nil { - return ConfigMarshalError{err} - } - s := t.String() - if _, err := f.WriteString(s); err != nil { - return ConfigMarshalError{err} - } - - case "yaml", "yml": - b, err := yaml.Marshal(c) - if err != nil { - return ConfigMarshalError{err} - } - if _, err = f.WriteString(string(b)); err != nil { - return ConfigMarshalError{err} - } - case "ini": keys := v.AllKeys() cfg := ini.Empty() @@ -1809,7 +1803,7 @@ func mergeMaps( for sk, sv := range src { tk := keyExists(sk, tgt) if tk == "" { - jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv) + v.logger.Trace("", "tk", "\"\"", fmt.Sprintf("tgt[%s]", sk), sv) tgt[sk] = sv if itgt != nil { itgt[sk] = sv @@ -1819,7 +1813,7 @@ func mergeMaps( tv, ok := tgt[tk] if !ok { - jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv) + v.logger.Trace("", fmt.Sprintf("ok[%s]", tk), false, fmt.Sprintf("tgt[%s]", sk), sv) tgt[sk] = sv if itgt != nil { itgt[sk] = sv @@ -1830,27 +1824,38 @@ func mergeMaps( svType := reflect.TypeOf(sv) tvType := reflect.TypeOf(tv) if tvType != nil && svType != tvType { // Allow for the target to be nil - jww.ERROR.Printf( - "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) + v.logger.Error( + "svType != tvType", + "key", sk, + "st", svType, + "tt", tvType, + "sv", sv, + "tv", tv, + ) continue } - jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) + v.logger.Trace( + "processing", + "key", sk, + "st", svType, + "tt", tvType, + "sv", sv, + "tv", tv, + ) switch ttv := tv.(type) { case map[interface{}]interface{}: - jww.TRACE.Printf("merging maps (must convert)") + v.logger.Trace("merging maps (must convert)") tsv := sv.(map[interface{}]interface{}) ssv := castToMapStringInterface(tsv) stv := castToMapStringInterface(ttv) mergeMaps(ssv, stv, ttv) case map[string]interface{}: - jww.TRACE.Printf("merging maps") + v.logger.Trace("merging maps") mergeMaps(sv.(map[string]interface{}), ttv, nil) default: - jww.TRACE.Printf("setting value") + v.logger.Trace("setting value") tgt[tk] = sv if itgt != nil { itgt[tk] = sv @@ -1885,7 +1890,7 @@ func (v *Viper) getKeyValueConfig() error { for _, rp := range v.remoteProviders { val, err := v.getRemoteConfig(rp) if err != nil { - jww.ERROR.Printf("get remote config: %s", err) + v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) continue } @@ -2121,39 +2126,6 @@ func (v *Viper) getConfigFile() (string, error) { return v.configFile, nil } -func (v *Viper) searchInPath(in string) (filename string) { - jww.DEBUG.Println("Searching for config in ", in) - for _, ext := range SupportedExts { - jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext)) - if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { - jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext)) - return filepath.Join(in, v.configName+"."+ext) - } - } - - if v.configType != "" { - if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { - return filepath.Join(in, v.configName) - } - } - - return "" -} - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { - jww.INFO.Println("Searching for config in ", v.configPaths) - - for _, cp := range v.configPaths { - file := v.searchInPath(cp) - if file != "" { - return file, nil - } - } - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} -} - // Debug prints all configuration registries for debugging // purposes. func Debug() { v.Debug() } diff --git a/vendor/github.com/spf13/viper/viper_go1_15.go b/vendor/github.com/spf13/viper/viper_go1_15.go new file mode 100644 index 00000000..19a771cb --- /dev/null +++ b/vendor/github.com/spf13/viper/viper_go1_15.go @@ -0,0 +1,57 @@ +//go:build !go1.16 || !finder +// +build !go1.16 !finder + +package viper + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/afero" +) + +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). +func (v *Viper) findConfigFile() (string, error) { + v.logger.Info("searching for config in paths", "paths", v.configPaths) + + for _, cp := range v.configPaths { + file := v.searchInPath(cp) + if file != "" { + return file, nil + } + } + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} +} + +func (v *Viper) searchInPath(in string) (filename string) { + v.logger.Debug("searching for config in path", "path", in) + for _, ext := range SupportedExts { + v.logger.Debug("checking if file exists", "file", filepath.Join(in, v.configName+"."+ext)) + if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { + v.logger.Debug("found file", "file", filepath.Join(in, v.configName+"."+ext)) + return filepath.Join(in, v.configName+"."+ext) + } + } + + if v.configType != "" { + if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { + return filepath.Join(in, v.configName) + } + } + + return "" +} + +// Check if file Exists +func exists(fs afero.Fs, path string) (bool, error) { + stat, err := fs.Stat(path) + if err == nil { + return !stat.IsDir(), nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} diff --git a/vendor/github.com/spf13/viper/viper_go1_16.go b/vendor/github.com/spf13/viper/viper_go1_16.go new file mode 100644 index 00000000..e10172fa --- /dev/null +++ b/vendor/github.com/spf13/viper/viper_go1_16.go @@ -0,0 +1,32 @@ +//go:build go1.16 && finder +// +build go1.16,finder + +package viper + +import ( + "fmt" + + "github.com/spf13/afero" +) + +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). +func (v *Viper) findConfigFile() (string, error) { + finder := finder{ + paths: v.configPaths, + fileNames: []string{v.configName}, + extensions: SupportedExts, + withoutExtension: v.configType != "", + } + + file, err := finder.Find(afero.NewIOFS(v.fs)) + if err != nil { + return "", err + } + + if file == "" { + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} + } + + return file, nil +} diff --git a/vendor/github.com/spf13/viper/watch.go b/vendor/github.com/spf13/viper/watch.go index c433a8fa..b5523b8f 100644 --- a/vendor/github.com/spf13/viper/watch.go +++ b/vendor/github.com/spf13/viper/watch.go @@ -1,3 +1,4 @@ +//go:build !js // +build !js package viper diff --git a/vendor/github.com/steinfletcher/apitest/go.mod b/vendor/github.com/steinfletcher/apitest/go.mod deleted file mode 100644 index f8085b4d..00000000 --- a/vendor/github.com/steinfletcher/apitest/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/steinfletcher/apitest - -require github.com/davecgh/go-spew v1.1.1 - -go 1.13 diff --git a/vendor/github.com/steinfletcher/apitest/go.sum b/vendor/github.com/steinfletcher/apitest/go.sum deleted file mode 100644 index b5e2922e..00000000 --- a/vendor/github.com/steinfletcher/apitest/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/vendor/github.com/swaggo/gin-swagger/.goreleaser.yml b/vendor/github.com/swaggo/gin-swagger/.goreleaser.yml new file mode 100644 index 00000000..3a538866 --- /dev/null +++ b/vendor/github.com/swaggo/gin-swagger/.goreleaser.yml @@ -0,0 +1,10 @@ +builds: + - skip: true +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/vendor/github.com/swaggo/gin-swagger/.travis.yml b/vendor/github.com/swaggo/gin-swagger/.travis.yml deleted file mode 100644 index 7840f05b..00000000 --- a/vendor/github.com/swaggo/gin-swagger/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.13.x - - 1.14.x - -matrix: - fast_finish: true - -script: - - go test -coverprofile=coverage.txt -covermode=atomic - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/swaggo/gin-swagger/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/swaggo/gin-swagger/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..fd84126d --- /dev/null +++ b/vendor/github.com/swaggo/gin-swagger/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,8 @@ +**Describe the PR** +e.g. add cool parser. + +**Relation issue** +e.g. https://github.com/swaggo/gin-swagger/pull/123/files + +**Additional context** +Add any other context about the problem here. diff --git a/vendor/github.com/swaggo/gin-swagger/README.md b/vendor/github.com/swaggo/gin-swagger/README.md index 14a5fb91..cab81437 100644 --- a/vendor/github.com/swaggo/gin-swagger/README.md +++ b/vendor/github.com/swaggo/gin-swagger/README.md @@ -1,32 +1,40 @@ # gin-swagger -gin middleware to automatically generate RESTful API documentation with Swagger 2.0. +gin middleware to automatically generate RESTFUL API documentation with Swagger 2.0. -[![Travis branch](https://img.shields.io/travis/swaggo/gin-swagger/master.svg)](https://travis-ci.org/swaggo/gin-swagger) +[![Build Status](https://github.com/swaggo/gin-swagger/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/features/actions) [![Codecov branch](https://img.shields.io/codecov/c/github/swaggo/gin-swagger/master.svg)](https://codecov.io/gh/swaggo/gin-swagger) [![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/gin-swagger)](https://goreportcard.com/report/github.com/swaggo/gin-swagger) [![GoDoc](https://godoc.org/github.com/swaggo/gin-swagger?status.svg)](https://godoc.org/github.com/swaggo/gin-swagger) - +[![Release](https://img.shields.io/github/release/swaggo/gin-swagger.svg?style=flat-square)](https://github.com/swaggo/gin-swagger/releases) ## Usage ### Start using it + 1. Add comments to your API source code, [See Declarative Comments Format](https://swaggo.github.io/swaggo.io/declarative_comments_format/). 2. Download [Swag](https://github.com/swaggo/swag) for Go by using: + ```sh -$ go get -u github.com/swaggo/swag/cmd/swag +go get -u github.com/swaggo/swag/cmd/swag ``` -3. Run the [Swag](https://github.com/swaggo/swag) in your Go project root folder which contains `main.go` file, [Swag](https://github.com/swaggo/swag) will parse comments and generate required files(`docs` folder and `docs/doc.go`). +3. Run the [Swag](https://github.com/swaggo/swag) at your Go project root path(for instance `~/root/go-peoject-name`), + [Swag](https://github.com/swaggo/swag) will parse comments and generate required files(`docs` folder and `docs/doc.go`) + at `~/root/go-peoject-name/docs`. + ```sh -$ swag init +swag init ``` + 4. Download [gin-swagger](https://github.com/swaggo/gin-swagger) by using: + ```sh -$ go get -u github.com/swaggo/gin-swagger -$ go get -u github.com/swaggo/files +go get -u github.com/swaggo/gin-swagger +go get -u github.com/swaggo/files ``` -And import following in your code: + +Import following in your code: ```go import "github.com/swaggo/gin-swagger" // gin-swagger middleware @@ -36,83 +44,125 @@ import "github.com/swaggo/files" // swagger embed files ### Canonical example: -```go -package main +Now assume you have implemented a simple api as following: -import ( - "github.com/gin-gonic/gin" - swaggerFiles "github.com/swaggo/files" - ginSwagger "github.com/swaggo/gin-swagger" - - _ "github.com/swaggo/gin-swagger/example/basic/docs" // docs is generated by Swag CLI, you have to import it. -) - -// @title Swagger Example API -// @version 1.0 -// @description This is a sample server Petstore server. -// @termsOfService http://swagger.io/terms/ - -// @contact.name API Support -// @contact.url http://www.swagger.io/support -// @contact.email support@swagger.io +```go +// A get function which returns a hello world string by json +func Helloworld(g *gin.Context) { + g.JSON(http.StatusOK,"helloworld") +} -// @license.name Apache 2.0 -// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +``` -// @host petstore.swagger.io -// @BasePath /v2 -func main() { - r := gin.New() +So how to use gin-swagger on api above? Just follow the following guide. - url := ginSwagger.URL("http://localhost:8080/swagger/doc.json") // The url pointing to API definition - r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler, url)) +1. Add Comments for apis and main function with gin-swagger rules like following: - r.Run() +```go +// @BasePath /api/v1 + +// PingExample godoc +// @Summary ping example +// @Schemes +// @Description do ping +// @Tags example +// @Accept json +// @Produce json +// @Success 200 {string} Helloworld +// @Router /example/helloworld [get] +func Helloworld(g *gin.Context) { + g.JSON(http.StatusOK,"helloworld") } ``` -5. Run it, and browse to http://localhost:8080/swagger/index.html, you can see Swagger 2.0 Api documents. +2. Use `swag init` command to generate a docs, docs generated will be stored at +3. import the docs like this: + I assume your project named `github.com/go-project-name/docs`. -![swagger_index.html](https://user-images.githubusercontent.com/8943871/60704329-b7ab0680-9f36-11e9-9184-5c638c05e9c5.png) +```go +import ( + docs "github.com/go-project-name/docs" +) +``` -6. If you want to disable swagger when some environment variable is set, use `DisablingWrapHandler` +4. build your application and after that, go to http://localhost:8080/swagger/index.html ,you to see your Swagger UI. -### Example with disabling: +5. The full code and folder relatives here: ```go package main import ( - "github.com/gin-gonic/gin" - swaggerFiles "github.com/swaggo/files" - ginSwagger "github.com/swaggo/gin-swagger" - - _ "github.com/swaggo/gin-swagger/example/basic/docs" // docs is generated by Swag CLI, you have to import it. + "github.com/gin-gonic/gin" + docs "github.com/go-project-name/docs" + swaggerfiles "github.com/swaggo/files" + ginSwagger "github.com/swaggo/gin-swagger" + "net/http" ) +// @BasePath /api/v1 + +// PingExample godoc +// @Summary ping example +// @Schemes +// @Description do ping +// @Tags example +// @Accept json +// @Produce json +// @Success 200 {string} Helloworld +// @Router /example/helloworld [get] +func Helloworld(g *gin.Context) { + g.JSON(http.StatusOK,"helloworld") +} -// @title Swagger Example API -// @version 1.0 -// @description This is a sample server Petstore server. -// @termsOfService http://swagger.io/terms/ +func main() { + r := gin.Default() + docs.SwaggerInfo.BasePath = "/api/v1" + v1 := r.Group("/api/v1") + { + eg := v1.Group("/example") + { + eg.GET("/helloworld",Helloworld) + } + } + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerfiles.Handler)) + r.Run(":8080") -// @contact.name API Support -// @contact.url http://www.swagger.io/support -// @contact.email support@swagger.io +} +``` + +Demo project tree, `swag init` is run at relative `.` + +``` +. +├── docs +│   ├── docs.go +│   ├── swagger.json +│   └── swagger.yaml +├── go.mod +├── go.sum +└── main.go +``` + +## Configuration -// @license.name Apache 2.0 -// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +You can configure Swagger using different configuration options -// @host petstore.swagger.io -// @BasePath /v2 +```go func main() { r := gin.New() - - // use ginSwagger middleware to - r.GET("/swagger/*any", ginSwagger.DisablingWrapHandler(swaggerFiles.Handler, "NAME_OF_ENV_VARIABLE")) + + ginSwagger.WrapHandler(swaggerFiles.Handler, + ginSwagger.URL("http://localhost:8080/swagger/doc.json"), + ginSwagger.DefaultModelsExpandDepth(-1)) r.Run() } ``` -Then, if you set environment variable `NAME_OF_ENV_VARIABLE` to anything, `/swagger/*any` -will respond 404, just like when route unspecified. +| Option | Type | Default | Description | +| ------------------------ | ------ | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| URL | string | "doc.json" | URL pointing to API definition | +| DocExpantion | string | "list" | Controls the default expansion setting for the operations and tags. It can be 'list' (expands only the tags), 'full' (expands the tags and operations) or 'none' (expands nothing). | +| DeepLinking | bool | true | If set to true, enables deep linking for tags and operations. See the Deep Linking documentation for more information. | +| DefaultModelsExpandDepth | int | 1 | Default expansion depth for models (set to -1 completely hide the models). | +| InstanceName | string | "swagger" | The instance name of the swagger document. If multiple different swagger instances should be deployed on one gin router, ensure that each instance has a unique name (use the _--instanceName_ parameter to generate swagger documents with _swag init_). | diff --git a/vendor/github.com/swaggo/gin-swagger/go.mod b/vendor/github.com/swaggo/gin-swagger/go.mod deleted file mode 100644 index 74aafa21..00000000 --- a/vendor/github.com/swaggo/gin-swagger/go.mod +++ /dev/null @@ -1,21 +0,0 @@ -module github.com/swaggo/gin-swagger - -require ( - github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 - github.com/gin-contrib/gzip v0.0.1 - github.com/gin-contrib/sse v0.1.0 // indirect - github.com/gin-gonic/gin v1.4.0 - github.com/kr/pretty v0.1.0 // indirect - github.com/mattn/go-isatty v0.0.8 // indirect - github.com/stretchr/testify v1.3.0 - github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14 - github.com/swaggo/swag v1.5.1 - github.com/ugorji/go v1.1.13 // indirect - golang.org/x/net v0.0.0-20190611141213-3f473d35a33a - golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae // indirect - golang.org/x/text v0.3.2 // indirect - golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect -) - -go 1.13 diff --git a/vendor/github.com/swaggo/gin-swagger/go.sum b/vendor/github.com/swaggo/gin-swagger/go.sum deleted file mode 100644 index 3fef3c9e..00000000 --- a/vendor/github.com/swaggo/gin-swagger/go.sum +++ /dev/null @@ -1,100 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/gzip v0.0.1 h1:ezvKOL6jH+jlzdHNE4h9h8q8uMpDQjyl0NN0Jd7jozc= -github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w= -github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= -github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= -github.com/gin-gonic/gin v1.4.0 h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ= -github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= -github.com/go-openapi/jsonpointer v0.17.0 h1:nH6xp8XdXHx8dqveo0ZuJBluCO2qGrPbDNZ0dwoRHP0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= -github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/spec v0.19.0 h1:A4SZ6IWh3lnjH0rG0Z5lkxazMGBECtrZcbyYQi+64k4= -github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/swag v0.17.0 h1:iqrgMg7Q7SvtbWLlltPrkMs0UBJI6oTSs79JFRUi880= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:gxQT6pBGRuIGunNf/+tSOB5OHvguWi8Tbt82WOkf35E= -github.com/swaggo/swag v1.5.1 h1:2Agm8I4K5qb00620mHq0VJ05/KT4FtmALPIcQR9lEZM= -github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.5-pre h1:jyJKFOSEbdOc2HODrf2qcCkYOdq7zzXqA9bhW5oV4fM= -github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0= -github.com/ugorji/go v1.1.13 h1:nB3O5kBSQGjEQAcfe1aLUYuxmXdFKmYgBZhY32rQb6Q= -github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc= -github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.5-pre h1:5YV9PsFAN+ndcCtTM7s60no7nY7eTG3LPtxhSwuxzCs= -github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI= -github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190611141213-3f473d35a33a h1:+KkCgOMgnKSgenxTBoiwkMqTiouMIy/3o8RLdmSbGoY= -golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae h1:xiXzMMEQdQcric9hXtr1QU98MHunKK7OTtsoU6bYWs4= -golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b h1:/mJ+GKieZA6hFDQGdWZrjj4AXPl5ylY+5HusG80roy0= -golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ= -gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/swaggo/gin-swagger/swagger.go b/vendor/github.com/swaggo/gin-swagger/swagger.go index 2637e7c6..0d15d895 100644 --- a/vendor/github.com/swaggo/gin-swagger/swagger.go +++ b/vendor/github.com/swaggo/gin-swagger/swagger.go @@ -2,9 +2,11 @@ package ginSwagger import ( "html/template" + "net/http" "os" + "path/filepath" "regexp" - "strings" + "sync" "golang.org/x/net/webdav" @@ -12,11 +14,37 @@ import ( "github.com/swaggo/swag" ) +type swaggerConfig struct { + URL string + DeepLinking bool + DocExpansion string + DefaultModelsExpandDepth int + Oauth2RedirectURL template.JS +} + // Config stores ginSwagger configuration variables. type Config struct { //The url pointing to API definition (normally swagger.json or swagger.yaml). Default is `doc.json`. - URL string - DeepLinking bool + URL string + DeepLinking bool + DocExpansion string + DefaultModelsExpandDepth int + InstanceName string +} + +// Convert the config to a swagger one in order to fill unexposed template values. +func (c Config) ToSwaggerConfig() swaggerConfig { + return swaggerConfig{ + URL: c.URL, + DeepLinking: c.DeepLinking, + DocExpansion: c.DocExpansion, + DefaultModelsExpandDepth: c.DefaultModelsExpandDepth, + Oauth2RedirectURL: template.JS( + "`${window.location.protocol}//${window.location.host}$" + + "{window.location.pathname.split('/').slice(0, window.location.pathname.split('/').length - 1).join('/')}" + + "/oauth2-redirect.html`", + ), + } } // URL presents the url pointing to API definition (normally swagger.json or swagger.yaml). @@ -26,6 +54,13 @@ func URL(url string) func(c *Config) { } } +// DocExpansion list, full, none. +func DocExpansion(docExpansion string) func(c *Config) { + return func(c *Config) { + c.DocExpansion = docExpansion + } +} + // DeepLinking set the swagger deeplinking configuration func DeepLinking(deepLinking bool) func(c *Config) { return func(c *Config) { @@ -33,11 +68,30 @@ func DeepLinking(deepLinking bool) func(c *Config) { } } +// DefaultModelsExpandDepth set the default expansion depth for models +// (set to -1 completely hide the models). +func DefaultModelsExpandDepth(depth int) func(c *Config) { + return func(c *Config) { + c.DefaultModelsExpandDepth = depth + } +} + +// InstanceName set the instance name that was used to generate the swagger documents. +// Defaults to swag.Name ("swagger"). +func InstanceName(name string) func(c *Config) { + return func(c *Config) { + c.InstanceName = name + } +} + // WrapHandler wraps `http.Handler` into `gin.HandlerFunc`. func WrapHandler(h *webdav.Handler, confs ...func(c *Config)) gin.HandlerFunc { defaultConfig := &Config{ - URL: "doc.json", - DeepLinking: true, + URL: "doc.json", + DeepLinking: true, + DocExpansion: "list", + DefaultModelsExpandDepth: 1, + InstanceName: swag.Name, } for _, c := range confs { @@ -48,55 +102,59 @@ func WrapHandler(h *webdav.Handler, confs ...func(c *Config)) gin.HandlerFunc { } // CustomWrapHandler wraps `http.Handler` into `gin.HandlerFunc` -func CustomWrapHandler(config *Config, h *webdav.Handler) gin.HandlerFunc { - //create a template with name +func CustomWrapHandler(config *Config, handler *webdav.Handler) gin.HandlerFunc { + var once sync.Once + + if config.InstanceName == "" { + config.InstanceName = swag.Name + } + + // create a template with name t := template.New("swagger_index.html") index, _ := t.Parse(swagger_index_templ) var rexp = regexp.MustCompile(`(.*)(index\.html|doc\.json|favicon-16x16\.png|favicon-32x32\.png|/oauth2-redirect\.html|swagger-ui\.css|swagger-ui\.css\.map|swagger-ui\.js|swagger-ui\.js\.map|swagger-ui-bundle\.js|swagger-ui-bundle\.js\.map|swagger-ui-standalone-preset\.js|swagger-ui-standalone-preset\.js\.map)[\?|.]*`) return func(c *gin.Context) { + matches := rexp.FindStringSubmatch(c.Request.RequestURI) - type swaggerUIBundle struct { - URL string - DeepLinking bool - } - - var matches []string - if matches = rexp.FindStringSubmatch(c.Request.RequestURI); len(matches) != 3 { - c.Status(404) - c.Writer.Write([]byte("404 page not found")) + if len(matches) != 3 { + c.Status(http.StatusNotFound) + _, _ = c.Writer.Write([]byte("404 page not found")) return } + path := matches[2] - prefix := matches[1] - h.Prefix = prefix + once.Do(func() { + handler.Prefix = matches[1] + }) - if strings.HasSuffix(path, ".html") { + switch filepath.Ext(path) { + case ".html": c.Header("Content-Type", "text/html; charset=utf-8") - } else if strings.HasSuffix(path, ".css") { + case ".css": c.Header("Content-Type", "text/css; charset=utf-8") - } else if strings.HasSuffix(path, ".js") { + case ".js": c.Header("Content-Type", "application/javascript") - } else if strings.HasSuffix(path, ".json") { - c.Header("Content-Type", "application/json") + case ".png": + c.Header("Content-Type", "image/png") + case ".json": + c.Header("Content-Type", "application/json; charset=utf-8") } switch path { case "index.html": - index.Execute(c.Writer, &swaggerUIBundle{ - URL: config.URL, - DeepLinking: config.DeepLinking, - }) + _ = index.Execute(c.Writer, config.ToSwaggerConfig()) case "doc.json": - doc, err := swag.ReadDoc() + doc, err := swag.ReadDoc(config.InstanceName) if err != nil { - panic(err) + c.AbortWithStatus(http.StatusInternalServerError) + + return } - c.Writer.Write([]byte(doc)) - return + _, _ = c.Writer.Write([]byte(doc)) default: - h.ServeHTTP(c.Writer, c.Request) + handler.ServeHTTP(c.Writer, c.Request) } } } @@ -109,7 +167,7 @@ func DisablingWrapHandler(h *webdav.Handler, envName string) gin.HandlerFunc { return func(c *gin.Context) { // Simulate behavior when route unspecified and // return 404 HTTP code - c.String(404, "") + c.String(http.StatusNotFound, "") } } @@ -124,7 +182,7 @@ func DisablingCustomWrapHandler(config *Config, h *webdav.Handler, envName strin return func(c *gin.Context) { // Simulate behavior when route unspecified and // return 404 HTTP code - c.String(404, "") + c.String(http.StatusNotFound, "") } } @@ -209,6 +267,7 @@ window.onload = function() { url: "{{.URL}}", dom_id: '#swagger-ui', validatorUrl: null, + oauth2RedirectUrl: {{.Oauth2RedirectURL}}, presets: [ SwaggerUIBundle.presets.apis, SwaggerUIStandalonePreset @@ -217,7 +276,9 @@ window.onload = function() { SwaggerUIBundle.plugins.DownloadUrl ], layout: "StandaloneLayout", - deepLinking: {{.DeepLinking}} + docExpansion: "{{.DocExpansion}}", + deepLinking: {{.DeepLinking}}, + defaultModelsExpandDepth: {{.DefaultModelsExpandDepth}} }) window.ui = ui diff --git a/vendor/github.com/swaggo/swag/.gitignore b/vendor/github.com/swaggo/swag/.gitignore index bea8db68..6ba1ec22 100644 --- a/vendor/github.com/swaggo/swag/.gitignore +++ b/vendor/github.com/swaggo/swag/.gitignore @@ -1,5 +1,7 @@ dist testdata/simple*/docs +testdata/quotes/docs +testdata/quotes/quotes.so example/basic/docs/* example/celler/docs/* cover.out @@ -16,5 +18,5 @@ cover.out # Etc .DS_Store -swag -swag.exe +/swag +/swag.exe diff --git a/vendor/github.com/swaggo/swag/.goreleaser.yml b/vendor/github.com/swaggo/swag/.goreleaser.yml index d0b576ab..6ce3665b 100644 --- a/vendor/github.com/swaggo/swag/.goreleaser.yml +++ b/vendor/github.com/swaggo/swag/.goreleaser.yml @@ -1,12 +1,24 @@ build: main: cmd/swag/main.go -archive: - replacements: - darwin: Darwin - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 + goos: + - linux + - darwin + goarch: + - amd64 + - arm64 + - 386 + ignore: + - goos: darwin + goarch: arm64 +archives: + - + replacements: + darwin: Darwin + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + arm64: aarch64 checksum: name_template: 'checksums.txt' snapshot: diff --git a/vendor/github.com/swaggo/swag/.travis.yml b/vendor/github.com/swaggo/swag/.travis.yml deleted file mode 100644 index 36a51fd7..00000000 --- a/vendor/github.com/swaggo/swag/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.13.x - - 1.14.x - -install: - - make deps - -script: - - make fmt-check - - make lint - - make vet - - make build - - make test - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/swaggo/swag/Makefile b/vendor/github.com/swaggo/swag/Makefile index f7829961..fa679d87 100644 --- a/vendor/github.com/swaggo/swag/Makefile +++ b/vendor/github.com/swaggo/swag/Makefile @@ -56,12 +56,10 @@ clean: deps: $(GOGET) github.com/swaggo/cli $(GOGET) github.com/ghodss/yaml - $(GOGET) github.com/gin-gonic/gin $(GOGET) github.com/KyleBanks/depth $(GOGET) github.com/go-openapi/jsonreference $(GOGET) github.com/go-openapi/spec $(GOGET) github.com/stretchr/testify/assert - $(GOGET) github.com/alecthomas/template $(GOGET) golang.org/x/tools/go/loader .PHONY: devel-deps diff --git a/vendor/github.com/swaggo/swag/README.md b/vendor/github.com/swaggo/swag/README.md index 5edbd5d5..cad48eff 100644 --- a/vendor/github.com/swaggo/swag/README.md +++ b/vendor/github.com/swaggo/swag/README.md @@ -4,7 +4,7 @@ -[![Travis Status](https://img.shields.io/travis/swaggo/swag/master.svg)](https://travis-ci.org/swaggo/swag) +[![Build Status](https://github.com/swaggo/swag/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/features/actions) [![Coverage Status](https://img.shields.io/codecov/c/github/swaggo/swag/master.svg)](https://codecov.io/gh/swaggo/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/swaggo/swag)](https://goreportcard.com/report/github.com/swaggo/swag) [![codebeat badge](https://codebeat.co/badges/71e2f5e5-9e6b-405d-baf9-7cc8b5037330)](https://codebeat.co/projects/github-com-swaggo-swag-master) @@ -20,6 +20,7 @@ Swag converts Go annotations to Swagger Documentation 2.0. We've created a varie - [Getting started](#getting-started) - [Supported Web Frameworks](#supported-web-frameworks) - [How to use it with Gin](#how-to-use-it-with-gin) + - [The swag formatter](#The swag formatter) - [Implementation Status](#implementation-status) - [Declarative Comments Format](#declarative-comments-format) - [General API Info](#general-api-info) @@ -37,7 +38,8 @@ Swag converts Go annotations to Swagger Documentation 2.0. We've created a varie - [Use swaggerignore tag to exclude a field](#use-swaggerignore-tag-to-exclude-a-field) - [Add extension info to struct field](#add-extension-info-to-struct-field) - [Rename model to display](#rename-model-to-display) - - [How to using security annotations](#how-to-using-security-annotations) + - [How to use security annotations](#how-to-use-security-annotations) + - [Add a description for enum items](#add-a-description-for-enum-items) - [About the Project](#about-the-project) ## Getting started @@ -47,8 +49,11 @@ Swag converts Go annotations to Swagger Documentation 2.0. We've created a varie 2. Download swag by using: ```sh $ go get -u github.com/swaggo/swag/cmd/swag + +# 1.16 or newer +$ go install github.com/swaggo/swag/cmd/swag@latest ``` -To build from source you need [Go](https://golang.org/dl/) (1.9 or newer). +To build from source you need [Go](https://golang.org/dl/) (1.13 or newer). Or download a pre-compiled binary from the [release page](https://github.com/swaggo/swag/releases). @@ -62,6 +67,12 @@ $ swag init swag init -g http/api.go ``` +4. (optional) Use `swag fmt` format the SWAG comment. (Please upgrade to the latest version) + + ```sh + swag fmt + ``` + ## swag cli ```sh @@ -73,14 +84,36 @@ USAGE: swag init [command options] [arguments...] OPTIONS: - --generalInfo value, -g value Go file path in which 'swagger general API Info' is written (default: "main.go") - --dir value, -d value Directory you want to parse (default: "./") - --exclude value Exclude directoies and files, comma separated - --propertyStrategy value, -p value Property Naming Strategy like snakecase,camelcase,pascalcase (default: "camelcase") - --output value, -o value Output directory for all the generated files(swagger.json, swagger.yaml and doc.go) (default: "./docs") - --parseVendor Parse go files in 'vendor' folder, disabled by default - --parseDependency Parse go files in outside dependency folder, disabled by default - --parseInternal Parse go files in internal packages, disabled by default + --generalInfo value, -g value Go file path in which 'swagger general API Info' is written (default: "main.go") + --dir value, -d value Directory you want to parse (default: "./") + --exclude value Exclude directories and files when searching, comma separated + --propertyStrategy value, -p value Property Naming Strategy like snakecase,camelcase,pascalcase (default: "camelcase") + --output value, -o value Output directory for all the generated files(swagger.json, swagger.yaml and doc.go) (default: "./docs") + --parseVendor Parse go files in 'vendor' folder, disabled by default (default: false) + --parseDependency Parse go files in outside dependency folder, disabled by default (default: false) + --markdownFiles value, --md value Parse folder containing markdown files to use as description, disabled by default + --codeExampleFiles value, --cef value Parse folder containing code example files to use for the x-codeSamples extension, disabled by default + --parseInternal Parse go files in internal packages, disabled by default (default: false) + --generatedTime Generate timestamp at the top of docs.go, disabled by default (default: false) + --parseDepth value Dependency parse depth (default: 100) + --instanceName value Set the swagger document instance name (default: "swagger") + --help, -h show help (default: false) +``` + +```bash +swag fmt -h +NAME: + swag fmt - format swag comments + +USAGE: + swag fmt [command options] [arguments...] + +OPTIONS: + --dir value, -d value Directories you want to parse,comma separated and general-info file must be in the first one (default: "./") + --exclude value Exclude directories and files when searching, comma separated + --generalInfo value, -g value Go file path in which 'swagger general API Info' is written (default: "main.go") + --help, -h show help (default: false) + ``` ## Supported Web Frameworks @@ -91,6 +124,7 @@ OPTIONS: - [net/http](https://github.com/swaggo/http-swagger) - [flamingo](https://github.com/i-love-flamingo/swagger) - [fiber](https://github.com/arsmn/fiber-swagger) +- [atreugo](https://github.com/Nerzal/atreugo-swagger) ## How to use it with Gin @@ -105,51 +139,22 @@ import "github.com/swaggo/files" // swagger embed files 2. Add [General API](#general-api-info) annotations in `main.go` code: ```go -// @title Swagger Example API -// @version 1.0 -// @description This is a sample server celler server. -// @termsOfService http://swagger.io/terms/ - -// @contact.name API Support -// @contact.url http://www.swagger.io/support -// @contact.email support@swagger.io +// @title Swagger Example API +// @version 1.0 +// @description This is a sample server celler server. +// @termsOfService http://swagger.io/terms/ -// @license.name Apache 2.0 -// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io -// @host localhost:8080 -// @BasePath /api/v1 -// @query.collection.format multi +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html -// @securityDefinitions.basic BasicAuth - -// @securityDefinitions.apikey ApiKeyAuth -// @in header -// @name Authorization - -// @securitydefinitions.oauth2.application OAuth2Application -// @tokenUrl https://example.com/oauth/token -// @scope.write Grants write access -// @scope.admin Grants read and write access to administrative information - -// @securitydefinitions.oauth2.implicit OAuth2Implicit -// @authorizationurl https://example.com/oauth/authorize -// @scope.write Grants write access -// @scope.admin Grants read and write access to administrative information - -// @securitydefinitions.oauth2.password OAuth2Password -// @tokenUrl https://example.com/oauth/token -// @scope.read Grants read access -// @scope.write Grants write access -// @scope.admin Grants read and write access to administrative information - -// @securitydefinitions.oauth2.accessCode OAuth2AccessCode -// @tokenUrl https://example.com/oauth/token -// @authorizationurl https://example.com/oauth/authorize -// @scope.admin Grants read and write access to administrative information - -// @x-extension-openapi {"example": "value on a json format"} +// @host localhost:8080 +// @BasePath /api/v1 +// @securityDefinitions.basic BasicAuth func main() { r := gin.Default() @@ -174,7 +179,7 @@ func main() { //... ``` -Additionally some general API info can be set dynamically. The generated code package `docs` exports `SwaggerInfo` variable which we can use to set the title, description, version, host and base path programatically. Example using Gin: +Additionally some general API info can be set dynamically. The generated code package `docs` exports `SwaggerInfo` variable which we can use to set the title, description, version, host and base path programmatically. Example using Gin: ```go package main @@ -187,18 +192,15 @@ import ( "./docs" // docs is generated by Swag CLI, you have to import it. ) -// @contact.name API Support -// @contact.url http://www.swagger.io/support -// @contact.email support@swagger.io - -// @license.name Apache 2.0 -// @license.url http://www.apache.org/licenses/LICENSE-2.0.html - -// @termsOfService http://swagger.io/terms/ +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html func main() { - // programatically set swagger info + // programmatically set swagger info docs.SwaggerInfo.Title = "Swagger Example API" docs.SwaggerInfo.Description = "This is a sample server Petstore server." docs.SwaggerInfo.Version = "1.0" @@ -221,65 +223,63 @@ func main() { package controller import ( - "fmt" - "net/http" - "strconv" + "fmt" + "net/http" + "strconv" - "github.com/gin-gonic/gin" - "github.com/swaggo/swag/example/celler/httputil" - "github.com/swaggo/swag/example/celler/model" + "github.com/gin-gonic/gin" + "github.com/swaggo/swag/example/celler/httputil" + "github.com/swaggo/swag/example/celler/model" ) // ShowAccount godoc -// @Summary Show a account -// @Description get string by ID -// @ID get-string-by-int -// @Accept json -// @Produce json -// @Param id path int true "Account ID" -// @Success 200 {object} model.Account -// @Header 200 {string} Token "qwerty" -// @Failure 400 {object} httputil.HTTPError -// @Failure 404 {object} httputil.HTTPError -// @Failure 500 {object} httputil.HTTPError -// @Router /accounts/{id} [get] +// @Summary Show an account +// @Description get string by ID +// @Tags accounts +// @Accept json +// @Produce json +// @Param id path int true "Account ID" +// @Success 200 {object} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts/{id} [get] func (c *Controller) ShowAccount(ctx *gin.Context) { - id := ctx.Param("id") - aid, err := strconv.Atoi(id) - if err != nil { - httputil.NewError(ctx, http.StatusBadRequest, err) - return - } - account, err := model.AccountOne(aid) - if err != nil { - httputil.NewError(ctx, http.StatusNotFound, err) - return - } - ctx.JSON(http.StatusOK, account) + id := ctx.Param("id") + aid, err := strconv.Atoi(id) + if err != nil { + httputil.NewError(ctx, http.StatusBadRequest, err) + return + } + account, err := model.AccountOne(aid) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, account) } // ListAccounts godoc -// @Summary List accounts -// @Description get accounts -// @Accept json -// @Produce json -// @Param q query string false "name search by q" -// @Success 200 {array} model.Account -// @Header 200 {string} Token "qwerty" -// @Failure 400 {object} httputil.HTTPError -// @Failure 404 {object} httputil.HTTPError -// @Failure 500 {object} httputil.HTTPError -// @Router /accounts [get] +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] func (c *Controller) ListAccounts(ctx *gin.Context) { - q := ctx.Request.URL.Query().Get("q") - accounts, err := model.AccountsAll(q) - if err != nil { - httputil.NewError(ctx, http.StatusNotFound, err) - return - } - ctx.JSON(http.StatusOK, accounts) + q := ctx.Request.URL.Query().Get("q") + accounts, err := model.AccountsAll(q) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, accounts) } - //... ``` @@ -291,6 +291,21 @@ $ swag init ![swagger_index.html](https://raw.githubusercontent.com/swaggo/swag/master/assets/swagger-image.png) +## The swag formatter + +The Swag Comments can be automatically formatted, just like 'go fmt'. +Find the result of formatting [here](https://github.com/swaggo/swag/tree/master/example/celler). + +Usage: +```shell +swag fmt +``` + +Exclude folder: +```shell +swag fmt -d ./ --exclude ./internal +``` + ## Implementation Status [Swagger 2.0 document](https://swagger.io/docs/specification/2-0/basic-structure/) @@ -335,6 +350,8 @@ $ swag init | license.url | A URL to the license used for the API. MUST be in the format of a URL. | // @license.url http://www.apache.org/licenses/LICENSE-2.0.html | | host | The host (name or ip) serving the API. | // @host localhost:8080 | | BasePath | The base path on which the API is served. | // @BasePath /api/v1 | +| accept | A list of MIME types the APIs can consume. Note that Accept only affects operations with a request body, such as POST, PUT and PATCH. Value MUST be as described under [Mime Types](#mime-types). | // @accept json | +| produce | A list of MIME types the APIs can produce. Value MUST be as described under [Mime Types](#mime-types). | // @produce json | | query.collection.format | The default collection(array) param format in query,enums:csv,multi,pipes,tsv,ssv. If not set, csv is the default.| // @query.collection.format multi | schemes | The transfer protocol for the operation that separated by spaces. | // @schemes http https | | x-name | The extension key, must be start by x- and take only json value | // @x-example-key {"key": "value"} | @@ -365,15 +382,18 @@ When a short string in your documentation is insufficient, or you need images, c | id | A unique string used to identify the operation. Must be unique among all API operations. | | tags | A list of tags to each API operation that separated by commas. | | summary | A short summary of what the operation does. | -| accept | A list of MIME types the APIs can consume. Value MUST be as described under [Mime Types](#mime-types). | +| accept | A list of MIME types the APIs can consume. Note that Accept only affects operations with a request body, such as POST, PUT and PATCH. Value MUST be as described under [Mime Types](#mime-types). | | produce | A list of MIME types the APIs can produce. Value MUST be as described under [Mime Types](#mime-types). | | param | Parameters that separated by spaces. `param name`,`param type`,`data type`,`is mandatory?`,`comment` `attribute(optional)` | | security | [Security](#security) to each API operation. | -| success | Success response that separated by spaces. `return code`,`{param type}`,`data type`,`comment` | -| failure | Failure response that separated by spaces. `return code`,`{param type}`,`data type`,`comment` | +| success | Success response that separated by spaces. `return code or default`,`{param type}`,`data type`,`comment` | +| failure | Failure response that separated by spaces. `return code or default`,`{param type}`,`data type`,`comment` | +| response | As same as `success` and `failure` | | header | Header in response that separated by spaces. `return code`,`{param type}`,`data type`,`comment` | | router | Path definition that separated by spaces. `path`,`[httpMethod]` | | x-name | The extension key, must be start by x- and take only json value. | +| x-codeSample | Optional Markdown usage. take `file` as parameter. This will then search for a file named like the summary in the given folder. | +| deprecated | Mark endpoint as deprecated. | @@ -438,13 +458,14 @@ Besides that, `swag` also accepts aliases for some MIME Types as follows: ## Attribute ```go -// @Param enumstring query string false "string enums" Enums(A, B, C) -// @Param enumint query int false "int enums" Enums(1, 2, 3) -// @Param enumnumber query number false "int enums" Enums(1.1, 1.2, 1.3) -// @Param string query string false "string valid" minlength(5) maxlength(10) -// @Param int query int false "int valid" mininum(1) maxinum(10) -// @Param default query string false "string default" default(A) -// @Param collection query []string false "string collection" collectionFormat(multi) +// @Param enumstring query string false "string enums" Enums(A, B, C) +// @Param enumint query int false "int enums" Enums(1, 2, 3) +// @Param enumnumber query number false "int enums" Enums(1.1, 1.2, 1.3) +// @Param string query string false "string valid" minlength(5) maxlength(10) +// @Param int query int false "int valid" minimum(1) maximum(10) +// @Param default query string false "string default" default(A) +// @Param collection query []string false "string collection" collectionFormat(multi) +// @Param extensions query []string false "string collection" extensions(x-example=test,x-nullable) ``` It also works for the struct fields: @@ -465,17 +486,18 @@ Field Name | Type | Description default | * | Declares the value of the parameter that the server will use if none is provided, for example a "count" to control the number of results per page might default to 100 if not supplied by the client in the request. (Note: "default" has no meaning for required parameters.) See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. Unlike JSON Schema this value MUST conform to the defined [`type`](#parameterType) for this parameter. maximum | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.2. minimum | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.3. +multipleOf | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.1. maxLength | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.1. minLength | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.2. enums | [\*] | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1. format | `string` | The extending format for the previously mentioned [`type`](#parameterType). See [Data Type Formats](https://swagger.io/specification/v2/#dataTypeFormat) for further details. collectionFormat | `string` |Determines the format of the array if type array is used. Possible values are:

  • `csv` - comma separated values `foo,bar`.
  • `ssv` - space separated values `foo bar`.
  • `tsv` - tab separated values `foo\tbar`.
  • `pipes` - pipe separated values foo|bar.
  • `multi` - corresponds to multiple parameter instances instead of multiple values for a single instance `foo=bar&foo=baz`. This is valid only for parameters [`in`](#parameterIn) "query" or "formData".
Default value is `csv`. +extensions | `string` | Add extension to parameters. ### Future Field Name | Type | Description ---|:---:|--- -multipleOf | `number` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.1.1. pattern | `string` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. maxItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.2. minItems | `integer` | See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.3.3. @@ -548,21 +570,35 @@ type DeepObject struct { //in `proto` package ### Add a headers in response ```go -// @Success 200 {string} string "ok" -// @Header 200 {string} Location "/entity/1" -// @Header 200 {string} Token "qwerty" +// @Success 200 {string} string "ok" +// @failure 400 {string} string "error" +// @response default {string} string "other error" +// @Header 200 {string} Location "/entity/1" +// @Header 200,400,default {string} Token "token" +// @Header all {string} Token2 "token2" ``` ### Use multiple path params ```go /// ... -// @Param group_id path int true "Group ID" +// @Param group_id path int true "Group ID" // @Param account_id path int true "Account ID" // ... // @Router /examples/groups/{group_id}/accounts/{account_id} [get] ``` +### Add multiple paths + +```go +/// ... +// @Param group_id path int true "Group ID" +// @Param user_id path int true "User ID" +// ... +// @Router /examples/groups/{group_id}/user/{user_id}/address [put] +// @Router /examples/user/{user_id}/address [put] +``` + ### Example value of struct ```go @@ -662,7 +698,7 @@ type Account struct { ```go type Account struct { - ID string `json:"id" extensions:"x-nullable,x-abc=def"` // extensions fields must start with "x-" + ID string `json:"id" extensions:"x-nullable,x-abc=def,!x-omitempty"` // extensions fields must start with "x-" } ``` @@ -675,7 +711,8 @@ generate swagger doc as follows: "id": { "type": "string", "x-nullable": true, - "x-abc": "def" + "x-abc": "def", + "x-omitempty": false } } } @@ -688,7 +725,7 @@ type Resp struct { }//@name Response ``` -### How to using security annotations +### How to use security annotations General API info. @@ -714,6 +751,17 @@ Make it AND condition // @Security OAuth2Application[write, admin] ``` +### Add a description for enum items + +```go +type Example struct { + // Sort order: + // * asc - Ascending, from A to Z. + // * desc - Descending, from Z to A. + Order string `enums:"asc,desc"` +} +``` + ## About the Project This project was inspired by [yvasiyarov/swagger](https://github.com/yvasiyarov/swagger) but we simplified the usage and added support a variety of [web frameworks](#supported-web-frameworks). Gopher image source is [tenntenn/gopher-stickers](https://github.com/tenntenn/gopher-stickers). It has licenses [creative commons licensing](http://creativecommons.org/licenses/by/3.0/deed.en). ## Contributors diff --git a/vendor/github.com/swaggo/swag/README_zh-CN.md b/vendor/github.com/swaggo/swag/README_zh-CN.md index d14f4985..61c3ae6c 100644 --- a/vendor/github.com/swaggo/swag/README_zh-CN.md +++ b/vendor/github.com/swaggo/swag/README_zh-CN.md @@ -20,6 +20,7 @@ Swag将Go的注释转换为Swagger2.0文档。我们为流行的 [Go Web Framewo - [快速开始](#快速开始) - [支持的Web框架](#支持的web框架) - [如何与Gin集成](#如何与gin集成) +- [格式化说明](#格式化说明) - [开发现状](#开发现状) - [声明式注释格式](#声明式注释格式) - [通用API信息](#通用api信息) @@ -46,10 +47,13 @@ Swag将Go的注释转换为Swagger2.0文档。我们为流行的 [Go Web Framewo 2. 使用如下命令下载swag: ```bash -go get -u github.com/swaggo/swag/cmd/swag +$ go get -u github.com/swaggo/swag/cmd/swag + +# 1.16 及以上版本 +$ go install github.com/swaggo/swag/cmd/swag@latest ``` -从源码开始构建的话,需要有Go环境(1.9及以上版本)。 +从源码开始构建的话,需要有Go环境(1.13及以上版本)。 或者从github的release页面下载预编译好的二进制文件。 @@ -65,6 +69,12 @@ swag init swag init -g http/api.go ``` +4. (可选) 使用`fmt`格式化 SWAG 注释。(请先升级到最新版本) + +```bash +swag fmt +``` + ## swag cli ```bash @@ -76,14 +86,35 @@ USAGE: swag init [command options] [arguments...] OPTIONS: - --generalInfo value, -g value API通用信息所在的go源文件路径,如果是相对路径则基于API解析目录 (默认: "main.go") - --dir value, -d value API解析目录 (默认: "./") - --propertyStrategy value, -p value 结构体字段命名规则,三种:snakecase,camelcase,pascalcase (默认: "camelcase") - --output value, -o value 文件(swagger.json, swagger.yaml and doc.go)输出目录 (默认: "./docs") - --parseVendor 是否解析vendor目录里的go源文件,默认不 - --parseDependency 是否解析依赖目录中的go源文件,默认不 - --markdownFiles value, --md value 指定API的描述信息所使用的markdown文件所在的目录 - --generatedTime 是否输出时间到输出文件docs.go的顶部,默认是 + --generalInfo value, -g value API通用信息所在的go源文件路径,如果是相对路径则基于API解析目录 (默认: "main.go") + --dir value, -d value API解析目录 (默认: "./") + --exclude value 解析扫描时排除的目录,多个目录可用逗号分隔(默认:空) + --propertyStrategy value, -p value 结构体字段命名规则,三种:snakecase,camelcase,pascalcase (默认: "camelcase") + --output value, -o value 文件(swagger.json, swagger.yaml and doc.go)输出目录 (默认: "./docs") + --parseVendor 是否解析vendor目录里的go源文件,默认不 + --parseDependency 是否解析依赖目录中的go源文件,默认不 + --markdownFiles value, --md value 指定API的描述信息所使用的markdown文件所在的目录 + --generatedTime 是否输出时间到输出文件docs.go的顶部,默认是 + --codeExampleFiles value, --cef value 解析包含用于 x-codeSamples 扩展的代码示例文件的文件夹,默认禁用 + --parseInternal 解析 internal 包中的go文件,默认禁用 + --parseDepth value 依赖解析深度 (默认: 100) + --instanceName value 设置文档实例名 (默认: "swagger") +``` + +```bash +swag fmt -h +NAME: + swag fmt - format swag comments + +USAGE: + swag fmt [command options] [arguments...] + +OPTIONS: + --dir value, -d value API解析目录 (默认: "./") + --exclude value 解析扫描时排除的目录,多个目录可用逗号分隔(默认:空) + --generalInfo value, -g value API通用信息所在的go源文件路径,如果是相对路径则基于API解析目录 (默认: "main.go") + --help, -h show help (default: false) + ``` ## 支持的Web框架 @@ -106,52 +137,23 @@ import "github.com/swaggo/files" // swagger embed files 2. 在`main.go`源代码中添加通用的API注释: -```bash -// @title Swagger Example API -// @version 1.0 -// @description This is a sample server celler server. -// @termsOfService http://swagger.io/terms/ - -// @contact.name API Support -// @contact.url http://www.swagger.io/support -// @contact.email support@swagger.io - -// @license.name Apache 2.0 -// @license.url http://www.apache.org/licenses/LICENSE-2.0.html - -// @host localhost:8080 -// @BasePath /api/v1 -// @query.collection.format multi - -// @securityDefinitions.basic BasicAuth - -// @securityDefinitions.apikey ApiKeyAuth -// @in header -// @name Authorization - -// @securitydefinitions.oauth2.application OAuth2Application -// @tokenUrl https://example.com/oauth/token -// @scope.write Grants write access -// @scope.admin Grants read and write access to administrative information - -// @securitydefinitions.oauth2.implicit OAuth2Implicit -// @authorizationurl https://example.com/oauth/authorize -// @scope.write Grants write access -// @scope.admin Grants read and write access to administrative information +```go +// @title Swagger Example API +// @version 1.0 +// @description This is a sample server celler server. +// @termsOfService http://swagger.io/terms/ -// @securitydefinitions.oauth2.password OAuth2Password -// @tokenUrl https://example.com/oauth/token -// @scope.read Grants read access -// @scope.write Grants write access -// @scope.admin Grants read and write access to administrative information +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io -// @securitydefinitions.oauth2.accessCode OAuth2AccessCode -// @tokenUrl https://example.com/oauth/token -// @authorizationurl https://example.com/oauth/authorize -// @scope.admin Grants read and write access to administrative information +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html -// @x-extension-openapi {"example": "value on a json format"} +// @host localhost:8080 +// @BasePath /api/v1 +// @securityDefinitions.basic BasicAuth func main() { r := gin.Default() @@ -189,15 +191,12 @@ import ( "./docs" // docs is generated by Swag CLI, you have to import it. ) -// @contact.name API Support -// @contact.url http://www.swagger.io/support -// @contact.email support@swagger.io - -// @license.name Apache 2.0 -// @license.url http://www.apache.org/licenses/LICENSE-2.0.html - -// @termsOfService http://swagger.io/terms/ +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html func main() { // programatically set swagger info @@ -233,55 +232,53 @@ import ( ) // ShowAccount godoc -// @Summary Show a account -// @Description get string by ID -// @ID get-string-by-int -// @Accept json -// @Produce json -// @Param id path int true "Account ID" -// @Success 200 {object} model.Account -// @Header 200 {string} Token "qwerty" -// @Failure 400 {object} httputil.HTTPError -// @Failure 404 {object} httputil.HTTPError -// @Failure 500 {object} httputil.HTTPError -// @Router /accounts/{id} [get] +// @Summary Show an account +// @Description get string by ID +// @Tags accounts +// @Accept json +// @Produce json +// @Param id path int true "Account ID" +// @Success 200 {object} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts/{id} [get] func (c *Controller) ShowAccount(ctx *gin.Context) { - id := ctx.Param("id") - aid, err := strconv.Atoi(id) - if err != nil { - httputil.NewError(ctx, http.StatusBadRequest, err) - return - } - account, err := model.AccountOne(aid) - if err != nil { - httputil.NewError(ctx, http.StatusNotFound, err) - return - } - ctx.JSON(http.StatusOK, account) + id := ctx.Param("id") + aid, err := strconv.Atoi(id) + if err != nil { + httputil.NewError(ctx, http.StatusBadRequest, err) + return + } + account, err := model.AccountOne(aid) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, account) } // ListAccounts godoc -// @Summary List accounts -// @Description get accounts -// @Accept json -// @Produce json -// @Param q query string false "name search by q" -// @Success 200 {array} model.Account -// @Header 200 {string} Token "qwerty" -// @Failure 400 {object} httputil.HTTPError -// @Failure 404 {object} httputil.HTTPError -// @Failure 500 {object} httputil.HTTPError -// @Router /accounts [get] +// @Summary List accounts +// @Description get accounts +// @Tags accounts +// @Accept json +// @Produce json +// @Param q query string false "name search by q" Format(email) +// @Success 200 {array} model.Account +// @Failure 400 {object} httputil.HTTPError +// @Failure 404 {object} httputil.HTTPError +// @Failure 500 {object} httputil.HTTPError +// @Router /accounts [get] func (c *Controller) ListAccounts(ctx *gin.Context) { - q := ctx.Request.URL.Query().Get("q") - accounts, err := model.AccountsAll(q) - if err != nil { - httputil.NewError(ctx, http.StatusNotFound, err) - return - } - ctx.JSON(http.StatusOK, accounts) + q := ctx.Request.URL.Query().Get("q") + accounts, err := model.AccountsAll(q) + if err != nil { + httputil.NewError(ctx, http.StatusNotFound, err) + return + } + ctx.JSON(http.StatusOK, accounts) } - //... ``` @@ -289,10 +286,25 @@ func (c *Controller) ListAccounts(ctx *gin.Context) { swag init ``` -4. 运行程序,然后在浏览器中访问 http://localhost:8080/swagger/index.html。将看到Swagger 2.0 Api文档,如下所示: +4. 运行程序,然后在浏览器中访问 http://localhost:8080/swagger/index.html 。将看到Swagger 2.0 Api文档,如下所示: ![swagger_index.html](https://raw.githubusercontent.com/swaggo/swag/master/assets/swagger-image.png) +## 格式化说明 + +可以针对Swag的注释自动格式化,就像`go fmt`。 +此处查看格式化结果 [here](https://github.com/swaggo/swag/tree/master/example/celler). + +示例: +```shell +swag fmt +``` + +排除目录(不扫描)示例: +```shell +swag fmt -d ./ --exclude ./internal +``` + ## 开发现状 [Swagger 2.0 文档](https://swagger.io/docs/specification/2-0/basic-structure/) @@ -336,6 +348,8 @@ swag init | license.url | 用于API的许可证的URL。 必须采用网址格式。 | // @license.url http://www.apache.org/licenses/LICENSE-2.0.html | | host | 运行API的主机(主机名或IP地址)。 | // @host localhost:8080 | | BasePath | 运行API的基本路径。 | // @BasePath /api/v1 | +| accept | API 可以使用的 MIME 类型列表。 请注意,Accept 仅影响具有请求正文的操作,例如 POST、PUT 和 PATCH。 值必须如“[Mime类型](#mime-types)”中所述。 | // @accept json | +| produce | API可以生成的MIME类型的列表。值必须如“[Mime类型](#mime-types)”中所述。 | // @produce json | | query.collection.format | 请求URI query里数组参数的默认格式:csv,multi,pipes,tsv,ssv。 如果未设置,则默认为csv。 | // @query.collection.format multi | | schemes | 用空格分隔的请求的传输协议。 | // @schemes http https | | x-name | 扩展的键必须以x-开头,并且只能使用json值 | // @x-example-key {"key": "value"} | @@ -356,26 +370,27 @@ swag init Example [celler/controller](https://github.com/swaggo/swag/tree/master/example/celler/controller) -| 注释 | 描述 | | -| -------------------- | ------------------------------------------------------------------------------------------------------- | -------------------------------------------------- | +| 注释 | 描述 | +| -------------------- | ------------------------------------------------------------------------------------------------------- | | description | 操作行为的详细说明。 | -| description.markdown | 应用程序的简短描述。该描述将从名为`endpointname.md`的文件中读取。 | // @description.file endpoint.description.markdown | +| description.markdown | 应用程序的简短描述。该描述将从名为`endpointname.md`的文件中读取。 | | id | 用于标识操作的唯一字符串。在所有API操作中必须唯一。 | | tags | 每个API操作的标签列表,以逗号分隔。 | | summary | 该操作的简短摘要。 | -| accept | API可以使用的MIME类型的列表。值必须如“[Mime类型](#mime-types)”中所述。 | +| accept | API 可以使用的 MIME 类型列表。 请注意,Accept 仅影响具有请求正文的操作,例如 POST、PUT 和 PATCH。 值必须如“[Mime类型](#mime-types)”中所述。 | | produce | API可以生成的MIME类型的列表。值必须如“[Mime类型](#mime-types)”中所述。 | | param | 用空格分隔的参数。`param name`,`param type`,`data type`,`is mandatory?`,`comment` `attribute(optional)` | -| security | 每个API操作的[安全性](#security)。 | +| security | 每个API操作的[安全性](#安全性)。 | | success | 以空格分隔的成功响应。`return code`,`{param type}`,`data type`,`comment` | | failure | 以空格分隔的故障响应。`return code`,`{param type}`,`data type`,`comment` | +| response | 与success、failure作用相同 | | header | 以空格分隔的头字段。 `return code`,`{param type}`,`data type`,`comment` | | router | 以空格分隔的路径定义。 `path`,`[httpMethod]` | | x-name | 扩展字段必须以`x-`开头,并且只能使用json值。 | ## Mime类型 -`swag` g接受所有格式正确的MIME类型, 即使匹配 `*/*`。除此之外,`swag`还接受某些MIME类型的别名,如下所示: +`swag` 接受所有格式正确的MIME类型, 即使匹配 `*/*`。除此之外,`swag`还接受某些MIME类型的别名,如下所示: | Alias | MIME Type | | --------------------- | --------------------------------- | @@ -430,13 +445,14 @@ Example [celler/controller](https://github.com/swaggo/swag/tree/master/example/c ## 属性 ```go -// @Param enumstring query string false "string enums" Enums(A, B, C) -// @Param enumint query int false "int enums" Enums(1, 2, 3) -// @Param enumnumber query number false "int enums" Enums(1.1, 1.2, 1.3) -// @Param string query string false "string valid" minlength(5) maxlength(10) -// @Param int query int false "int valid" mininum(1) maxinum(10) -// @Param default query string false "string default" default(A) -// @Param collection query []string false "string collection" collectionFormat(multi) +// @Param enumstring query string false "string enums" Enums(A, B, C) +// @Param enumint query int false "int enums" Enums(1, 2, 3) +// @Param enumnumber query number false "int enums" Enums(1.1, 1.2, 1.3) +// @Param string query string false "string valid" minlength(5) maxlength(10) +// @Param int query int false "int valid" minimum(1) maximum(10) +// @Param default query string false "string default" default(A) +// @Param collection query []string false "string collection" collectionFormat(multi) +// @Param extensions query []string false "string collection" extensions(x-example=test,x-nullable) ``` 也适用于结构体字段: @@ -535,17 +551,20 @@ type Order struct { //in `proto` package ### 在响应中增加头字段 ```go -// @Success 200 {string} string "ok" -// @Header 200 {string} Location "/entity/1" -// @Header 200 {string} Token "qwerty" +// @Success 200 {string} string "ok" +// @failure 400 {string} string "error" +// @response default {string} string "other error" +// @Header 200 {string} Location "/entity/1" +// @Header 200,400,default {string} Token "token" +// @Header all {string} Token2 "token2" ``` ### 使用多路径参数 ```go /// ... -// @Param group_id path int true "Group ID" -// @Param account_id path int true "Account ID" +// @Param group_id path int true "Group ID" +// @Param account_id path int true "Account ID" // ... // @Router /examples/groups/{group_id}/accounts/{account_id} [get] ``` @@ -652,7 +671,7 @@ type Account struct { ```go type Account struct { - ID string `json:"id" extensions:"x-nullable,x-abc=def"` // 扩展字段必须以"x-"开头 + ID string `json:"id" extensions:"x-nullable,x-abc=def,!x-omitempty"` // 扩展字段必须以"x-"开头 } ``` @@ -665,7 +684,8 @@ type Account struct { "id": { "type": "string", "x-nullable": true, - "x-abc": "def" + "x-abc": "def", + "x-omitempty": false } } } diff --git a/vendor/github.com/swaggo/swag/debug.go b/vendor/github.com/swaggo/swag/debug.go deleted file mode 100644 index f28a8e65..00000000 --- a/vendor/github.com/swaggo/swag/debug.go +++ /dev/null @@ -1,30 +0,0 @@ -package swag - -import ( - "log" -) - -const ( - test = iota - release -) - -var swagMode = release - -func isRelease() bool { - return swagMode == release -} - -// Println calls Output to print to the standard logger when release mode. -func Println(v ...interface{}) { - if isRelease() { - log.Println(v...) - } -} - -// Printf calls Output to print to the standard logger when release mode. -func Printf(format string, v ...interface{}) { - if isRelease() { - log.Printf(format, v...) - } -} diff --git a/vendor/github.com/swaggo/swag/field_parser.go b/vendor/github.com/swaggo/swag/field_parser.go new file mode 100644 index 00000000..500e2f4e --- /dev/null +++ b/vendor/github.com/swaggo/swag/field_parser.go @@ -0,0 +1,528 @@ +package swag + +import ( + "fmt" + "go/ast" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "unicode" + + "github.com/go-openapi/spec" +) + +var _ FieldParser = &tagBaseFieldParser{} + +type tagBaseFieldParser struct { + p *Parser + field *ast.Field + tag reflect.StructTag +} + +func newTagBaseFieldParser(p *Parser, field *ast.Field) FieldParser { + ps := &tagBaseFieldParser{ + p: p, + field: field, + } + if ps.field.Tag != nil { + ps.tag = reflect.StructTag(strings.Replace(field.Tag.Value, "`", "", -1)) + } + + return ps +} + +func (ps *tagBaseFieldParser) ShouldSkip() (bool, error) { + // Skip non-exported fields. + if !ast.IsExported(ps.field.Names[0].Name) { + return true, nil + } + + if ps.field.Tag == nil { + return false, nil + } + + ignoreTag := ps.tag.Get("swaggerignore") + if strings.EqualFold(ignoreTag, "true") { + return true, nil + } + + // json:"tag,hoge" + name := strings.TrimSpace(strings.Split(ps.tag.Get("json"), ",")[0]) + if name == "-" { + return true, nil + } + + return false, nil +} + +func (ps *tagBaseFieldParser) FieldName() (string, error) { + var name string + if ps.field.Tag != nil { + // json:"tag,hoge" + name = strings.TrimSpace(strings.Split(ps.tag.Get("json"), ",")[0]) + + if name != "" { + return name, nil + } + } + + switch ps.p.PropNamingStrategy { + case SnakeCase: + return toSnakeCase(ps.field.Names[0].Name), nil + case PascalCase: + return ps.field.Names[0].Name, nil + default: + return toLowerCamelCase(ps.field.Names[0].Name), nil + } +} + +func toSnakeCase(in string) string { + runes := []rune(in) + length := len(runes) + + var out []rune + for i := 0; i < length; i++ { + if i > 0 && unicode.IsUpper(runes[i]) && + ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) { + out = append(out, '_') + } + out = append(out, unicode.ToLower(runes[i])) + } + + return string(out) +} + +func toLowerCamelCase(in string) string { + runes := []rune(in) + + var out []rune + flag := false + for i, curr := range runes { + if (i == 0 && unicode.IsUpper(curr)) || (flag && unicode.IsUpper(curr)) { + out = append(out, unicode.ToLower(curr)) + flag = true + } else { + out = append(out, curr) + flag = false + } + } + + return string(out) +} + +func (ps *tagBaseFieldParser) CustomSchema() (*spec.Schema, error) { + if ps.field.Tag == nil { + return nil, nil + } + + typeTag := ps.tag.Get("swaggertype") + if typeTag != "" { + return BuildCustomSchema(strings.Split(typeTag, ",")) + } + + return nil, nil +} + +type structField struct { + desc string + schemaType string + arrayType string + formatType string + maximum *float64 + minimum *float64 + multipleOf *float64 + maxLength *int64 + minLength *int64 + maxItems *int64 + minItems *int64 + exampleValue interface{} + defaultValue interface{} + extensions map[string]interface{} + enums []interface{} + readOnly bool + unique bool +} + +func (ps *tagBaseFieldParser) ComplementSchema(schema *spec.Schema) error { + types := ps.p.GetSchemaTypePath(schema, 2) + if len(types) == 0 { + return fmt.Errorf("invalid type for field: %s", ps.field.Names[0]) + } + + if ps.field.Tag == nil { + if ps.field.Doc != nil { + schema.Description = strings.TrimSpace(ps.field.Doc.Text()) + } + if schema.Description == "" && ps.field.Comment != nil { + schema.Description = strings.TrimSpace(ps.field.Comment.Text()) + } + return nil + } + + structField := &structField{ + schemaType: types[0], + formatType: ps.tag.Get("format"), + readOnly: ps.tag.Get("readonly") == "true", + } + + if len(types) > 1 && (types[0] == ARRAY || types[0] == OBJECT) { + structField.arrayType = types[1] + } + + if ps.field.Doc != nil { + structField.desc = strings.TrimSpace(ps.field.Doc.Text()) + } + if structField.desc == "" && ps.field.Comment != nil { + structField.desc = strings.TrimSpace(ps.field.Comment.Text()) + } + + jsonTag := ps.tag.Get("json") + // json:"name,string" or json:",string" + + exampleTag := ps.tag.Get("example") + if exampleTag != "" { + structField.exampleValue = exampleTag + if !strings.Contains(jsonTag, ",string") { + example, err := defineTypeOfExample(structField.schemaType, structField.arrayType, exampleTag) + if err != nil { + return err + } + structField.exampleValue = example + } + } + + bindingTag := ps.tag.Get("binding") + if bindingTag != "" { + ps.parseValidTags(bindingTag, structField) + } + + validateTag := ps.tag.Get("validate") + if validateTag != "" { + ps.parseValidTags(validateTag, structField) + } + + extensionsTag := ps.tag.Get("extensions") + if extensionsTag != "" { + structField.extensions = map[string]interface{}{} + for _, val := range strings.Split(extensionsTag, ",") { + parts := strings.SplitN(val, "=", 2) + if len(parts) == 2 { + structField.extensions[parts[0]] = parts[1] + } else { + if len(parts[0]) > 0 && string(parts[0][0]) == "!" { + structField.extensions[parts[0][1:]] = false + } else { + structField.extensions[parts[0]] = true + } + } + } + } + + enumsTag := ps.tag.Get("enums") + if enumsTag != "" { + enumType := structField.schemaType + if structField.schemaType == ARRAY { + enumType = structField.arrayType + } + + structField.enums = nil + for _, e := range strings.Split(enumsTag, ",") { + value, err := defineType(enumType, e) + if err != nil { + return err + } + structField.enums = append(structField.enums, value) + } + } + + defaultTag := ps.tag.Get("default") + if defaultTag != "" { + value, err := defineType(structField.schemaType, defaultTag) + if err != nil { + return err + } + structField.defaultValue = value + } + + if IsNumericType(structField.schemaType) || IsNumericType(structField.arrayType) { + maximum, err := getFloatTag(ps.tag, "maximum") + if err != nil { + return err + } + if maximum != nil { + structField.maximum = maximum + } + + minimum, err := getFloatTag(ps.tag, "minimum") + if err != nil { + return err + } + if minimum != nil { + structField.minimum = minimum + } + + multipleOf, err := getFloatTag(ps.tag, "multipleOf") + if err != nil { + return err + } + if multipleOf != nil { + structField.multipleOf = multipleOf + } + } + + if structField.schemaType == STRING || structField.arrayType == STRING { + maxLength, err := getIntTag(ps.tag, "maxLength") + if err != nil { + return err + } + if maxLength != nil { + structField.maxLength = maxLength + } + + minLength, err := getIntTag(ps.tag, "minLength") + if err != nil { + return err + } + if minLength != nil { + structField.minLength = minLength + } + } + + // perform this after setting everything else (min, max, etc...) + if strings.Contains(jsonTag, ",string") { // @encoding/json: "It applies only to fields of string, floating point, integer, or boolean types." + defaultValues := map[string]string{ + // Zero Values as string + STRING: "", + INTEGER: "0", + BOOLEAN: "false", + NUMBER: "0", + } + + defaultValue, ok := defaultValues[structField.schemaType] + if ok { + structField.schemaType = STRING + + if structField.exampleValue == nil { + // if exampleValue is not defined by the user, + // we will force an example with a correct value + // (eg: int->"0", bool:"false") + structField.exampleValue = defaultValue + } + } + } + + if structField.schemaType == STRING && types[0] != STRING { + *schema = *PrimitiveSchema(structField.schemaType) + } + + schema.Description = structField.desc + schema.ReadOnly = structField.readOnly + if !reflect.ValueOf(schema.Ref).IsZero() && schema.ReadOnly { + schema.AllOf = []spec.Schema{*spec.RefSchema(schema.Ref.String())} + schema.Ref = spec.Ref{} // clear out existing ref + } + schema.Default = structField.defaultValue + schema.Example = structField.exampleValue + if structField.schemaType != ARRAY { + schema.Format = structField.formatType + } + schema.Extensions = structField.extensions + eleSchema := schema + if structField.schemaType == ARRAY { + // For Array only + schema.MaxItems = structField.maxItems + schema.MinItems = structField.minItems + schema.UniqueItems = structField.unique + + eleSchema = schema.Items.Schema + eleSchema.Format = structField.formatType + } + eleSchema.Maximum = structField.maximum + eleSchema.Minimum = structField.minimum + eleSchema.MultipleOf = structField.multipleOf + eleSchema.MaxLength = structField.maxLength + eleSchema.MinLength = structField.minLength + eleSchema.Enum = structField.enums + + return nil +} + +func getFloatTag(structTag reflect.StructTag, tagName string) (*float64, error) { + strValue := structTag.Get(tagName) + if strValue == "" { + return nil, nil + } + + value, err := strconv.ParseFloat(strValue, 64) + if err != nil { + return nil, fmt.Errorf("can't parse numeric value of %q tag: %v", tagName, err) + } + + return &value, nil +} + +func getIntTag(structTag reflect.StructTag, tagName string) (*int64, error) { + strValue := structTag.Get(tagName) + if strValue == "" { + return nil, nil + } + + value, err := strconv.ParseInt(strValue, 10, 64) + if err != nil { + return nil, fmt.Errorf("can't parse numeric value of %q tag: %v", tagName, err) + } + + return &value, nil +} + +func (ps *tagBaseFieldParser) IsRequired() (bool, error) { + if ps.field.Tag == nil { + return false, nil + } + + bindingTag := ps.tag.Get("binding") + if bindingTag != "" { + for _, val := range strings.Split(bindingTag, ",") { + if val == "required" { + return true, nil + } + } + } + + validateTag := ps.tag.Get("validate") + if validateTag != "" { + for _, val := range strings.Split(validateTag, ",") { + if val == "required" { + return true, nil + } + } + } + + return false, nil +} + +func (ps *tagBaseFieldParser) parseValidTags(validTag string, sf *structField) { + // `validate:"required,max=10,min=1"` + // ps. required checked by IsRequired(). + for _, val := range strings.Split(validTag, ",") { + var ( + valKey string + valValue string + ) + kv := strings.Split(val, "=") + switch len(kv) { + case 1: + valKey = kv[0] + case 2: + valKey = kv[0] + valValue = kv[1] + default: + continue + } + valValue = strings.Replace(strings.Replace(valValue, utf8HexComma, ",", -1), utf8Pipe, "|", -1) + + switch valKey { + case "max", "lte": + sf.setMax(valValue) + case "min", "gte": + sf.setMin(valValue) + case "oneof": + sf.setOneOf(valValue) + case "unique": + if sf.schemaType == ARRAY { + sf.unique = true + } + case "dive": + // ignore dive + return + default: + continue + } + } +} + +func (sf *structField) setOneOf(valValue string) { + if len(sf.enums) != 0 { + return + } + + enumType := sf.schemaType + if sf.schemaType == ARRAY { + enumType = sf.arrayType + } + + valValues := parseOneOfParam2(valValue) + for i := range valValues { + value, err := defineType(enumType, valValues[i]) + if err != nil { + continue + } + sf.enums = append(sf.enums, value) + } +} + +func (sf *structField) setMin(valValue string) { + value, err := strconv.ParseFloat(valValue, 64) + if err != nil { + return + } + switch sf.schemaType { + case INTEGER, NUMBER: + sf.minimum = &value + case STRING: + intValue := int64(value) + sf.minLength = &intValue + case ARRAY: + intValue := int64(value) + sf.minItems = &intValue + } +} + +func (sf *structField) setMax(valValue string) { + value, err := strconv.ParseFloat(valValue, 64) + if err != nil { + return + } + switch sf.schemaType { + case INTEGER, NUMBER: + sf.maximum = &value + case STRING: + intValue := int64(value) + sf.maxLength = &intValue + case ARRAY: + intValue := int64(value) + sf.maxItems = &intValue + } +} + +const ( + utf8HexComma = "0x2C" + utf8Pipe = "0x7C" +) + +// These code copy from +// https://github.com/go-playground/validator/blob/d4271985b44b735c6f76abc7a06532ee997f9476/baked_in.go#L207 +// --- +var oneofValsCache = map[string][]string{} +var oneofValsCacheRWLock = sync.RWMutex{} +var splitParamsRegex = regexp.MustCompile(`'[^']*'|\S+`) + +func parseOneOfParam2(s string) []string { + oneofValsCacheRWLock.RLock() + values, ok := oneofValsCache[s] + oneofValsCacheRWLock.RUnlock() + if !ok { + oneofValsCacheRWLock.Lock() + values = splitParamsRegex.FindAllString(s, -1) + for i := 0; i < len(values); i++ { + values[i] = strings.Replace(values[i], "'", "", -1) + } + oneofValsCache[s] = values + oneofValsCacheRWLock.Unlock() + } + return values +} + +// --- diff --git a/vendor/github.com/swaggo/swag/formater.go b/vendor/github.com/swaggo/swag/formater.go new file mode 100644 index 00000000..d9af1dd2 --- /dev/null +++ b/vendor/github.com/swaggo/swag/formater.go @@ -0,0 +1,346 @@ +package swag + +import ( + "bytes" + "crypto/md5" + "fmt" + "go/ast" + goparser "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "text/tabwriter" +) + +const splitTag = "&*" + +// Formater implements a formater for Go source files. +type Formater struct { + // debugging output goes here + debug Debugger + + // excludes excludes dirs and files in SearchDir + excludes map[string]bool + + mainFile string +} + +// NewFormater create a new formater +func NewFormater() *Formater { + formater := &Formater{ + debug: log.New(os.Stdout, "", log.LstdFlags), + excludes: make(map[string]bool), + } + return formater +} + +// FormatAPI format the swag comment. +func (f *Formater) FormatAPI(searchDir, excludeDir, mainFile string) error { + searchDirs := strings.Split(searchDir, ",") + for _, searchDir := range searchDirs { + if _, err := os.Stat(searchDir); os.IsNotExist(err) { + return fmt.Errorf("dir: %s does not exist", searchDir) + } + } + for _, fi := range strings.Split(excludeDir, ",") { + fi = strings.TrimSpace(fi) + if fi != "" { + fi = filepath.Clean(fi) + f.excludes[fi] = true + } + } + + // parse main.go + absMainAPIFilePath, err := filepath.Abs(filepath.Join(searchDirs[0], mainFile)) + if err != nil { + return err + } + err = f.FormatMain(absMainAPIFilePath) + if err != nil { + return err + } + f.mainFile = mainFile + + err = f.formatMultiSearchDir(searchDirs) + if err != nil { + return err + } + + return nil +} + +func (f *Formater) formatMultiSearchDir(searchDirs []string) error { + for _, searchDir := range searchDirs { + f.debug.Printf("Format API Info, search dir:%s", searchDir) + + err := filepath.Walk(searchDir, f.visit) + if err != nil { + return err + } + } + return nil +} + +func (f *Formater) visit(path string, fileInfo os.FileInfo, err error) error { + if err := f.skip(path, fileInfo); err != nil { + return err + } else if fileInfo.IsDir() { + // skip if file is folder + return nil + } + + if strings.HasSuffix(strings.ToLower(path), "_test.go") || filepath.Ext(path) != ".go" { + // skip if file not has suffix "*.go" + return nil + } + if strings.HasSuffix(strings.ToLower(path), f.mainFile) { + // skip main file + return nil + } + + err = f.FormatFile(path) + if err != nil { + return fmt.Errorf("ParseFile error:%+v", err) + } + return nil +} + +// skip skip folder in ('vendor' 'docs' 'excludes' 'hidden folder') +func (f *Formater) skip(path string, fileInfo os.FileInfo) error { + if fileInfo.IsDir() { + if fileInfo.Name() == "vendor" || // ignore "vendor" + fileInfo.Name() == "docs" || // exclude docs + len(fileInfo.Name()) > 1 && fileInfo.Name()[0] == '.' { // exclude all hidden folder + return filepath.SkipDir + } + + if f.excludes != nil { + if _, ok := f.excludes[path]; ok { + return filepath.SkipDir + } + } + } + return nil +} + +// FormatMain format the main.go comment +func (f *Formater) FormatMain(mainFilepath string) error { + fileSet := token.NewFileSet() + astFile, err := goparser.ParseFile(fileSet, mainFilepath, nil, goparser.ParseComments) + if err != nil { + return fmt.Errorf("cannot format file, err: %w path : %s ", err, mainFilepath) + } + var ( + formatedComments = bytes.Buffer{} + // CommentCache + oldCommentsMap = make(map[string]string) + ) + + if astFile.Comments != nil { + for _, comment := range astFile.Comments { + formatFuncDoc(comment.List, &formatedComments, oldCommentsMap) + } + } + + return writeFormatedComments(mainFilepath, formatedComments, oldCommentsMap) +} + +// FormatFile format the swag comment in go function. +func (f *Formater) FormatFile(filepath string) error { + fileSet := token.NewFileSet() + astFile, err := goparser.ParseFile(fileSet, filepath, nil, goparser.ParseComments) + if err != nil { + return fmt.Errorf("cannot format file, err: %w path : %s ", err, filepath) + } + + var ( + formatedComments = bytes.Buffer{} + // CommentCache + oldCommentsMap = make(map[string]string) + ) + + for _, astDescription := range astFile.Decls { + astDeclaration, ok := astDescription.(*ast.FuncDecl) + if ok && astDeclaration.Doc != nil && astDeclaration.Doc.List != nil { + formatFuncDoc(astDeclaration.Doc.List, &formatedComments, oldCommentsMap) + } + } + + return writeFormatedComments(filepath, formatedComments, oldCommentsMap) +} + +func writeFormatedComments(filepath string, formatedComments bytes.Buffer, oldCommentsMap map[string]string) error { + // Replace the file + // Read the file + srcBytes, err := ioutil.ReadFile(filepath) + if err != nil { + return fmt.Errorf("cannot open file, err: %w path : %s ", err, filepath) + } + replaceSrc := string(srcBytes) + newComments := strings.Split(formatedComments.String(), "\n") + for _, e := range newComments { + commentSplit := strings.Split(e, splitTag) + if len(commentSplit) == 2 { + commentHash, commentContent := commentSplit[0], commentSplit[1] + + if !isBlankComment(commentContent) { + oldComment := oldCommentsMap[commentHash] + if strings.Contains(replaceSrc, oldComment) { + replaceSrc = strings.Replace(replaceSrc, oldComment, commentContent, 1) + } + } + } + } + return writeBack(filepath, []byte(replaceSrc), srcBytes) +} + +func formatFuncDoc(commentList []*ast.Comment, formatedComments *bytes.Buffer, oldCommentsMap map[string]string) { + tabw := tabwriter.NewWriter(formatedComments, 0, 0, 2, ' ', 0) + + for _, comment := range commentList { + commentLine := comment.Text + if isSwagComment(commentLine) || isBlankComment(commentLine) { + cmd5 := fmt.Sprintf("%x", md5.Sum([]byte(commentLine))) + + // Find the separator and replace to \t + c := separatorFinder(commentLine, '\t') + oldCommentsMap[cmd5] = commentLine + + // md5 + splitTag + srcCommentLine + // eg. xxx&*@Description get struct array + _, _ = fmt.Fprintln(tabw, cmd5+splitTag+c) + } + } + // format by tabwriter + _ = tabw.Flush() +} + +// Check of @Param @Success @Failure @Response @Header +var specialTagForSplit = map[string]byte{ + "@param": 1, + "@success": 1, + "@failure": 1, + "@response": 1, + "@header": 1, +} + +var skipChar = map[byte]byte{ + '"': 1, + '(': 1, + '{': 1, + '[': 1, +} + +var skipCharEnd = map[byte]byte{ + '"': 1, + ')': 1, + '}': 1, + ']': 1, +} + +func separatorFinder(comment string, rp byte) string { + commentBytes := []byte(comment) + commentLine := strings.TrimSpace(strings.TrimLeft(comment, "/")) + if len(commentLine) == 0 { + return "" + } + attribute := strings.Fields(commentLine)[0] + attrLen := strings.Index(comment, attribute) + len(attribute) + attribute = strings.ToLower(attribute) + var i = attrLen + + if _, ok := specialTagForSplit[attribute]; ok { + var skipFlag bool + for ; i < len(commentBytes); i++ { + if !skipFlag && commentBytes[i] == ' ' { + j := i + for j < len(commentBytes) && commentBytes[j] == ' ' { + j++ + } + commentBytes = replaceRange(commentBytes, i, j, rp) + } + if _, ok := skipChar[commentBytes[i]]; ok && !skipFlag { + skipFlag = true + } else if _, ok := skipCharEnd[commentBytes[i]]; ok && skipFlag { + skipFlag = false + } + } + } else { + for i < len(commentBytes) && commentBytes[i] == ' ' { + i++ + } + if i >= len(commentBytes) { + return comment + } + commentBytes = replaceRange(commentBytes, attrLen, i, rp) + } + return string(commentBytes) +} + +func replaceRange(s []byte, start, end int, new byte) []byte { + if start > end || end < 1 { + return s + } + if end > len(s) { + end = len(s) + } + s = append(s[:start], s[end-1:]...) + s[start] = new + return s +} + +func isSwagComment(comment string) bool { + lc := strings.ToLower(comment) + return regexp.MustCompile("@[A-z]+").MatchString(lc) +} + +func isBlankComment(comment string) bool { + lc := strings.TrimSpace(comment) + return len(lc) == 0 +} + +// writeBack write to file +func writeBack(filepath string, src, old []byte) error { + // make a temporary backup before overwriting original + bakname, err := backupFile(filepath+".", old, 0644) + if err != nil { + return err + } + err = ioutil.WriteFile(filepath, src, 0644) + if err != nil { + _ = os.Rename(bakname, filepath) + return err + } + _ = os.Remove(bakname) + return nil +} + +const chmodSupported = runtime.GOOS != "windows" + +// backupFile writes data to a new file named filename with permissions perm, +// with 0 && - IsSimplePrimitiveType(prop.Items.Schema.Type[0]) { - param = createParameter(paramType, prop.Description, name, prop.Type[0], find(schema.Required, name)) + IsSimplePrimitiveType(prop.Items.Schema.Type[0]): + param = createParameter(paramType, prop.Description, name, prop.Type[0], findInSlice(schema.Required, name)) param.SimpleSchema.Type = prop.Type[0] if operation.parser != nil && operation.parser.collectionFormatInQuery != "" && param.CollectionFormat == "" { param.CollectionFormat = TransToValidCollectionFormat(operation.parser.collectionFormatInQuery) @@ -233,10 +296,11 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F Type: prop.Items.Schema.Type[0], }, } - } else if IsSimplePrimitiveType(prop.Type[0]) { - param = createParameter(paramType, prop.Description, name, prop.Type[0], find(schema.Required, name)) - } else { - Println(fmt.Sprintf("skip field [%s] in %s is not supported type for %s", name, refType, paramType)) + case IsSimplePrimitiveType(prop.Type[0]): + param = createParameter(paramType, prop.Description, name, prop.Type[0], findInSlice(schema.Required, name)) + default: + operation.parser.debug.Printf("skip field [%s] in %s is not supported type for %s", name, refType, paramType) + continue } param.Nullable = prop.Nullable @@ -258,17 +322,14 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F param.CommonValidations.Enum = prop.Enum operation.Operation.Parameters = append(operation.Operation.Parameters, param) } + return nil } case "body": - switch objectType { - case "primitive": - param.Schema.Type = spec.StringOrArray{refType} - case "array": - refType = "[]" + refType - fallthrough - case "object": - schema, err := operation.parseObjectSchema(refType, astFile) + if objectType == PRIMITIVE { + param.Schema = PrimitiveSchema(refType) + } else { + schema, err := operation.parseAPIObjectSchema(objectType, refType, astFile) if err != nil { return err } @@ -278,76 +339,46 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F return fmt.Errorf("%s is not supported paramType", paramType) } - if err := operation.parseAndExtractionParamAttribute(commentLine, objectType, refType, ¶m); err != nil { + err := operation.parseAndExtractionParamAttribute(commentLine, objectType, refType, ¶m) + if err != nil { return err } operation.Operation.Parameters = append(operation.Operation.Parameters, param) + return nil } -func (operation *Operation) registerSchemaType(schemaType string, astFile *ast.File) (string, *ast.TypeSpec, error) { - if !strings.ContainsRune(schemaType, '.') { - if astFile == nil { - return schemaType, nil, fmt.Errorf("no package name for type %s", schemaType) - } - schemaType = fullTypeName(astFile.Name.String(), schemaType) - } - refSplit := strings.Split(schemaType, ".") - pkgName := refSplit[0] - typeName := refSplit[1] - if typeSpec, ok := operation.parser.TypeDefinitions[pkgName][typeName]; ok { - operation.parser.registerTypes[schemaType] = typeSpec - return schemaType, typeSpec, nil - } - var typeSpec *ast.TypeSpec - if astFile == nil { - return schemaType, nil, fmt.Errorf("can not register schema type: %q reason: astFile == nil", schemaType) - } - for _, imp := range astFile.Imports { - if imp.Name != nil && imp.Name.Name == pkgName { // the import had an alias that matched - break - } - impPath := strings.Replace(imp.Path.Value, `"`, ``, -1) - if strings.HasSuffix(impPath, "/"+pkgName) { - var err error - typeSpec, err = findTypeDef(impPath, typeName) - if err != nil { - return schemaType, nil, fmt.Errorf("can not find type def: %q error: %s", schemaType, err) - } - break - } - } - - if typeSpec == nil { - return schemaType, nil, fmt.Errorf("can not find schema type: %q", schemaType) - } - - if _, ok := operation.parser.TypeDefinitions[pkgName]; !ok { - operation.parser.TypeDefinitions[pkgName] = make(map[string]*ast.TypeSpec) - } - - operation.parser.TypeDefinitions[pkgName][typeName] = typeSpec - operation.parser.registerTypes[schemaType] = typeSpec - return schemaType, typeSpec, nil -} +const ( + defaultTag = "default" + enumsTag = "enums" + formatTag = "format" + minimumTag = "minimum" + maximumTag = "maximum" + minLengthTag = "minlength" + maxLengthTag = "maxlength" + extensionsTag = "extensions" + collectionFormatTag = "collectionFormat" +) var regexAttributes = map[string]*regexp.Regexp{ // for Enums(A, B) - "enums": regexp.MustCompile(`(?i)\s+enums\(.*\)`), - // for Minimum(0) - "maxinum": regexp.MustCompile(`(?i)\s+maxinum\(.*\)`), - // for Maximum(0) - "mininum": regexp.MustCompile(`(?i)\s+mininum\(.*\)`), - // for Maximum(0) - "default": regexp.MustCompile(`(?i)\s+default\(.*\)`), + enumsTag: regexp.MustCompile(`(?i)\s+enums\(.*\)`), + // for maximum(0) + maximumTag: regexp.MustCompile(`(?i)\s+maxinum|maximum\(.*\)`), + // for minimum(0) + minimumTag: regexp.MustCompile(`(?i)\s+mininum|minimum\(.*\)`), + // for default(0) + defaultTag: regexp.MustCompile(`(?i)\s+default\(.*\)`), // for minlength(0) - "minlength": regexp.MustCompile(`(?i)\s+minlength\(.*\)`), + minLengthTag: regexp.MustCompile(`(?i)\s+minlength\(.*\)`), // for maxlength(0) - "maxlength": regexp.MustCompile(`(?i)\s+maxlength\(.*\)`), + maxLengthTag: regexp.MustCompile(`(?i)\s+maxlength\(.*\)`), // for format(email) - "format": regexp.MustCompile(`(?i)\s+format\(.*\)`), + formatTag: regexp.MustCompile(`(?i)\s+format\(.*\)`), + // for extensions(x-example=test) + extensionsTag: regexp.MustCompile(`(?i)\s+extensions\(.*\)`), // for collectionFormat(csv) - "collectionFormat": regexp.MustCompile(`(?i)\s+collectionFormat\(.*\)`), + collectionFormatTag: regexp.MustCompile(`(?i)\s+collectionFormat\(.*\)`), } func (operation *Operation) parseAndExtractionParamAttribute(commentLine, objectType, schemaType string, param *spec.Parameter) error { @@ -358,51 +389,26 @@ func (operation *Operation) parseAndExtractionParamAttribute(commentLine, object continue } switch attrKey { - case "enums": - err := setEnumParam(attr, schemaType, param) - if err != nil { - return err - } - case "maxinum": - n, err := setNumberParam(attrKey, schemaType, attr, commentLine) - if err != nil { - return err - } - param.Maximum = &n - case "mininum": - n, err := setNumberParam(attrKey, schemaType, attr, commentLine) - if err != nil { - return err - } - param.Minimum = &n - case "default": - value, err := defineType(schemaType, attr) - if err != nil { - return nil - } - param.Default = value - case "maxlength": - n, err := setStringParam(attrKey, schemaType, attr, commentLine) - if err != nil { - return err - } - param.MaxLength = &n - case "minlength": - n, err := setStringParam(attrKey, schemaType, attr, commentLine) - if err != nil { - return err - } - param.MinLength = &n - case "format": + case enumsTag: + err = setEnumParam(param, attr, objectType, schemaType) + case minimumTag, maximumTag: + err = setNumberParam(param, attrKey, schemaType, attr, commentLine) + case defaultTag: + err = setDefault(param, schemaType, attr) + case minLengthTag, maxLengthTag: + err = setStringParam(param, attrKey, schemaType, attr, commentLine) + case formatTag: param.Format = attr - case "collectionFormat": - n, err := setCollectionFormatParam(attrKey, objectType, attr, commentLine) - if err != nil { - return err - } - param.CollectionFormat = n + case extensionsTag: + _ = setExtensionParam(param, attr) + case collectionFormatTag: + err = setCollectionFormatParam(param, attrKey, objectType, attr, commentLine) + } + if err != nil { + return err } } + return nil } @@ -413,32 +419,50 @@ func findAttr(re *regexp.Regexp, commentLine string) (string, error) { if l == -1 || r == -1 { return "", fmt.Errorf("can not find regex=%s, comment=%s", re.String(), commentLine) } + return strings.TrimSpace(attr[l+1 : r]), nil } -func setStringParam(name, schemaType, attr, commentLine string) (int64, error) { - if schemaType != "string" { - return 0, fmt.Errorf("%s is attribute to set to a number. comment=%s got=%s", name, commentLine, schemaType) +func setStringParam(param *spec.Parameter, name, schemaType, attr, commentLine string) error { + if schemaType != STRING { + return fmt.Errorf("%s is attribute to set to a number. comment=%s got=%s", name, commentLine, schemaType) } + n, err := strconv.ParseInt(attr, 10, 64) if err != nil { - return 0, fmt.Errorf("%s is allow only a number got=%s", name, attr) + return fmt.Errorf("%s is allow only a number got=%s", name, attr) } - return n, nil -} -func setNumberParam(name, schemaType, attr, commentLine string) (float64, error) { - if schemaType != "integer" && schemaType != "number" { - return 0, fmt.Errorf("%s is attribute to set to a number. comment=%s got=%s", name, commentLine, schemaType) + switch name { + case minLengthTag: + param.MinLength = &n + case maxLengthTag: + param.MaxLength = &n } - n, err := strconv.ParseFloat(attr, 64) - if err != nil { - return 0, fmt.Errorf("maximum is allow only a number. comment=%s got=%s", commentLine, attr) + + return nil +} + +func setNumberParam(param *spec.Parameter, name, schemaType, attr, commentLine string) error { + switch schemaType { + case INTEGER, NUMBER: + n, err := strconv.ParseFloat(attr, 64) + if err != nil { + return fmt.Errorf("maximum is allow only a number. comment=%s got=%s", commentLine, attr) + } + switch name { + case minimumTag: + param.Minimum = &n + case maximumTag: + param.Maximum = &n + } + return nil + default: + return fmt.Errorf("%s is attribute to set to a number. comment=%s got=%s", name, commentLine, schemaType) } - return n, nil } -func setEnumParam(attr, schemaType string, param *spec.Parameter) error { +func setEnumParam(param *spec.Parameter, attr, objectType, schemaType string) error { for _, e := range strings.Split(attr, ",") { e = strings.TrimSpace(e) @@ -446,45 +470,76 @@ func setEnumParam(attr, schemaType string, param *spec.Parameter) error { if err != nil { return err } - param.Enum = append(param.Enum, value) + + switch objectType { + case ARRAY: + param.Items.Enum = append(param.Items.Enum, value) + default: + param.Enum = append(param.Enum, value) + } } + return nil } -func setCollectionFormatParam(name, schemaType, attr, commentLine string) (string, error) { - if schemaType != "array" { - return "", fmt.Errorf("%s is attribute to set to an array. comment=%s got=%s", name, commentLine, schemaType) +func setExtensionParam(param *spec.Parameter, attr string) error { + param.Extensions = map[string]interface{}{} + for _, val := range strings.Split(attr, ",") { + parts := strings.SplitN(val, "=", 2) + if len(parts) == 2 { + param.Extensions.Add(parts[0], parts[1]) + + continue + } + param.Extensions.Add(parts[0], true) + } + return nil +} + +func setCollectionFormatParam(param *spec.Parameter, name, schemaType, attr, commentLine string) error { + if schemaType == ARRAY { + param.CollectionFormat = TransToValidCollectionFormat(attr) + return nil + } + + return fmt.Errorf("%s is attribute to set to an array. comment=%s got=%s", name, commentLine, schemaType) +} + +func setDefault(param *spec.Parameter, schemaType string, value string) error { + val, err := defineType(schemaType, value) + if err != nil { + return nil // Don't set a default value if it's not valid } - return TransToValidCollectionFormat(attr), nil + param.Default = val + return nil } -// defineType enum value define the type (object and array unsupported) -func defineType(schemaType string, value string) (interface{}, error) { +// defineType enum value define the type (object and array unsupported). +func defineType(schemaType string, value string) (v interface{}, err error) { schemaType = TransToValidSchemeType(schemaType) switch schemaType { - case "string": + case STRING: return value, nil - case "number": - v, err := strconv.ParseFloat(value, 64) + case NUMBER: + v, err = strconv.ParseFloat(value, 64) if err != nil { return nil, fmt.Errorf("enum value %s can't convert to %s err: %s", value, schemaType, err) } - return v, nil - case "integer": - v, err := strconv.Atoi(value) + case INTEGER: + v, err = strconv.Atoi(value) if err != nil { return nil, fmt.Errorf("enum value %s can't convert to %s err: %s", value, schemaType, err) } - return v, nil - case "boolean": - v, err := strconv.ParseBool(value) + case BOOLEAN: + v, err = strconv.ParseBool(value) if err != nil { return nil, fmt.Errorf("enum value %s can't convert to %s err: %s", value, schemaType, err) } - return v, nil default: - return nil, fmt.Errorf("%s is unsupported type in enum value", schemaType) + return nil, fmt.Errorf("%s is unsupported type in enum value %s", schemaType, value) } + + return v, nil } // ParseTagsComment parses comment for given `tag` comment string. @@ -507,42 +562,49 @@ func (operation *Operation) ParseProduceComment(commentLine string) error { // parseMimeTypeList parses a list of MIME Types for a comment like // `produce` (`Content-Type:` response header) or -// `accept` (`Accept:` request header) +// `accept` (`Accept:` request header). func parseMimeTypeList(mimeTypeList string, typeList *[]string, format string) error { mimeTypes := strings.Split(mimeTypeList, ",") for _, typeName := range mimeTypes { if mimeTypePattern.MatchString(typeName) { *typeList = append(*typeList, typeName) + continue } - if aliasMimeType, ok := mimeTypeAliases[typeName]; ok { - *typeList = append(*typeList, aliasMimeType) - continue + aliasMimeType, ok := mimeTypeAliases[typeName] + if !ok { + return fmt.Errorf(format, typeName) } - return fmt.Errorf(format, typeName) + + *typeList = append(*typeList, aliasMimeType) } + return nil } -var routerPattern = regexp.MustCompile(`^(/[\w\.\/\-{}\+:]*)[[:blank:]]+\[(\w+)]`) +var routerPattern = regexp.MustCompile(`^(/[\w./\-{}+:$]*)[[:blank:]]+\[(\w+)]`) -// ParseRouterComment parses comment for gived `router` comment string. +// ParseRouterComment parses comment for given `router` comment string. func (operation *Operation) ParseRouterComment(commentLine string) error { - var matches []string - - if matches = routerPattern.FindStringSubmatch(commentLine); len(matches) != 3 { + matches := routerPattern.FindStringSubmatch(commentLine) + if len(matches) != 3 { return fmt.Errorf("can not parse router comment \"%s\"", commentLine) } - path := matches[1] - httpMethod := matches[2] + signature := RouteProperties{ + Path: matches[1], + HTTPMethod: strings.ToUpper(matches[2]), + } - operation.Path = path - operation.HTTPMethod = strings.ToUpper(httpMethod) + if _, ok := allMethod[signature.HTTPMethod]; !ok { + return fmt.Errorf("invalid method: %s", signature.HTTPMethod) + } + + operation.RouterProperties = append(operation.RouterProperties, signature) return nil } -// ParseSecurityComment parses comment for gived `security` comment string. +// ParseSecurityComment parses comment for given `security` comment string. func (operation *Operation) ParseSecurityComment(commentLine string) error { securitySource := commentLine[strings.Index(commentLine, "@Security")+1:] l := strings.Index(securitySource, "[") @@ -550,10 +612,9 @@ func (operation *Operation) ParseSecurityComment(commentLine string) error { // exists scope if !(l == -1 && r == -1) { scopes := securitySource[l+1 : r] - s := []string{} + var s []string for _, scope := range strings.Split(scopes, ",") { - scope = strings.TrimSpace(scope) - s = append(s, scope) + s = append(s, strings.TrimSpace(scope)) } securityKey := securitySource[0:l] securityMap := map[string][]string{} @@ -565,11 +626,12 @@ func (operation *Operation) ParseSecurityComment(commentLine string) error { securityMap[securityKey] = []string{} operation.Security = append(operation.Security, securityMap) } + return nil } // findTypeDef attempts to find the *ast.TypeSpec for a specific type given the -// type's name and the package's import path +// type's name and the package's import path. // TODO: improve finding external pkg func findTypeDef(importPath, typeName string) (*ast.TypeSpec, error) { cwd, err := os.Getwd() @@ -609,9 +671,11 @@ func findTypeDef(importPath, typeName string) (*ast.TypeSpec, error) { for i := range pkgInfo.Files { for _, astDeclaration := range pkgInfo.Files[i].Decls { - if generalDeclaration, ok := astDeclaration.(*ast.GenDecl); ok && generalDeclaration.Tok == token.TYPE { + generalDeclaration, ok := astDeclaration.(*ast.GenDecl) + if ok && generalDeclaration.Tok == token.TYPE { for _, astSpec := range generalDeclaration.Specs { - if typeSpec, ok := astSpec.(*ast.TypeSpec); ok { + typeSpec, ok := astSpec.(*ast.TypeSpec) + if ok { if typeSpec.Name.String() == typeName { return typeSpec, nil } @@ -620,72 +684,67 @@ func findTypeDef(importPath, typeName string) (*ast.TypeSpec, error) { } } } + return nil, fmt.Errorf("type spec not found") } -var responsePattern = regexp.MustCompile(`([\d]+)[\s]+([\w\{\}]+)[\s]+([\w\-\.\/\{\}=,\[\]]+)[^"]*(.*)?`) +var responsePattern = regexp.MustCompile(`^([\w,]+)[\s]+([\w{}]+)[\s]+([\w\-.\\{}=,\[\]]+)[^"]*(.*)?`) -//RepsonseType{data1=Type1,data2=Type2} -var combinedPattern = regexp.MustCompile(`^([\w\-\.\/\[\]]+)\{(.*)\}$`) +// ResponseType{data1=Type1,data2=Type2}. +var combinedPattern = regexp.MustCompile(`^([\w\-./\[\]]+){(.*)}$`) func (operation *Operation) parseObjectSchema(refType string, astFile *ast.File) (*spec.Schema, error) { switch { + case refType == NIL: + return nil, nil case refType == "interface{}": - return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{"object"}}}, nil + return PrimitiveSchema(OBJECT), nil case IsGolangPrimitiveType(refType): refType = TransToValidSchemeType(refType) - return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{refType}}}, nil + + return PrimitiveSchema(refType), nil case IsPrimitiveType(refType): - return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{refType}}}, nil + return PrimitiveSchema(refType), nil case strings.HasPrefix(refType, "[]"): schema, err := operation.parseObjectSchema(refType[2:], astFile) if err != nil { return nil, err } - return &spec.Schema{SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{Schema: schema}}, - }, nil + + return spec.ArrayProperty(schema), nil case strings.HasPrefix(refType, "map["): - //ignore key type + // ignore key type idx := strings.Index(refType, "]") if idx < 0 { return nil, fmt.Errorf("invalid type: %s", refType) } refType = refType[idx+1:] - var valueSchema spec.SchemaOrBool if refType == "interface{}" { - valueSchema.Allows = true - } else { - schema, err := operation.parseObjectSchema(refType, astFile) - if err != nil { - return &spec.Schema{}, err - } - valueSchema.Schema = schema + return spec.MapProperty(nil), nil } - return &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &valueSchema, - }, - }, nil + schema, err := operation.parseObjectSchema(refType, astFile) + if err != nil { + return nil, err + } + + return spec.MapProperty(schema), nil case strings.Contains(refType, "{"): - return operation.parseResponseCombinedObjectSchema(refType, astFile) + return operation.parseCombinedObjectSchema(refType, astFile) default: if operation.parser != nil { // checking refType has existing in 'TypeDefinitions' - refNewType, typeSpec, err := operation.registerSchemaType(refType, astFile) + schema, err := operation.parser.getTypeSchema(refType, astFile, true) if err != nil { return nil, err } - refType = TypeDocName(refNewType, typeSpec) + + return schema, nil } - return &spec.Schema{SchemaProps: spec.SchemaProps{Ref: spec.Ref{ - Ref: jsonreference.MustCreateRef("#/definitions/" + refType), - }}}, nil + + return RefSchema(refType), nil } } -func (operation *Operation) parseResponseCombinedObjectSchema(refType string, astFile *ast.File) (*spec.Schema, error) { +func (operation *Operation) parseCombinedObjectSchema(refType string, astFile *ast.File) (*spec.Schema, error) { matches := combinedPattern.FindStringSubmatch(refType) if len(matches) != 3 { return nil, fmt.Errorf("invalid type: %s", refType) @@ -698,14 +757,18 @@ func (operation *Operation) parseResponseCombinedObjectSchema(refType string, as parseFields := func(s string) []string { n := 0 + return strings.FieldsFunc(s, func(r rune) bool { if r == '{' { n++ + return false } else if r == '}' { n-- + return false } + return r == ',' && n == 0 }) } @@ -713,189 +776,223 @@ func (operation *Operation) parseResponseCombinedObjectSchema(refType string, as fields := parseFields(matches[2]) props := map[string]spec.Schema{} for _, field := range fields { - if matches := strings.SplitN(field, "=", 2); len(matches) == 2 { - if strings.HasPrefix(matches[1], "[]") { - itemSchema, err := operation.parseObjectSchema(matches[1][2:], astFile) - if err != nil { - return nil, err - } - props[matches[0]] = spec.Schema{SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{Schema: itemSchema}}, - } - } else { - schema, err := operation.parseObjectSchema(matches[1], astFile) - if err != nil { - return nil, err - } - props[matches[0]] = *schema + matches := strings.SplitN(field, "=", 2) + if len(matches) == 2 { + schema, err := operation.parseObjectSchema(matches[1], astFile) + if err != nil { + return nil, err } + props[matches[0]] = *schema } } if len(props) == 0 { return schema, nil } - return &spec.Schema{ + + return spec.ComposedSchema(*schema, spec.Schema{ SchemaProps: spec.SchemaProps{ - AllOf: []spec.Schema{ - *schema, - { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: props, - }, - }, - }, + Type: []string{OBJECT}, + Properties: props, }, - }, nil + }), nil } -func (operation *Operation) parseResponseSchema(schemaType, refType string, astFile *ast.File) (*spec.Schema, error) { +func (operation *Operation) parseAPIObjectSchema(schemaType, refType string, astFile *ast.File) (*spec.Schema, error) { switch schemaType { - case "object": + case OBJECT: if !strings.HasPrefix(refType, "[]") { return operation.parseObjectSchema(refType, astFile) } refType = refType[2:] + fallthrough - case "array": + case ARRAY: schema, err := operation.parseObjectSchema(refType, astFile) if err != nil { return nil, err } - return &spec.Schema{SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{Schema: schema}}, - }, nil + + return spec.ArrayProperty(schema), nil default: - return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{schemaType}}}, nil + return PrimitiveSchema(schemaType), nil } } // ParseResponseComment parses comment for given `response` comment string. func (operation *Operation) ParseResponseComment(commentLine string, astFile *ast.File) error { - var matches []string - - if matches = responsePattern.FindStringSubmatch(commentLine); len(matches) != 5 { + matches := responsePattern.FindStringSubmatch(commentLine) + if len(matches) != 5 { err := operation.ParseEmptyResponseComment(commentLine) if err != nil { return operation.ParseEmptyResponseOnly(commentLine) } + return err } - code, _ := strconv.Atoi(matches[1]) - responseDescription := strings.Trim(matches[4], "\"") - if responseDescription == "" { - responseDescription = http.StatusText(code) - } - - schemaType := strings.Trim(matches[2], "{}") - refType := matches[3] - schema, err := operation.parseResponseSchema(schemaType, refType, astFile) + schema, err := operation.parseAPIObjectSchema(strings.Trim(matches[2], "{}"), matches[3], astFile) if err != nil { return err } - if operation.Responses == nil { - operation.Responses = &spec.Responses{ - ResponsesProps: spec.ResponsesProps{ - StatusCodeResponses: make(map[int]spec.Response), - }, + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + operation.DefaultResponse().Schema = schema + operation.DefaultResponse().Description = responseDescription + + continue } + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + resp := &spec.Response{ + ResponseProps: spec.ResponseProps{Schema: schema, Description: responseDescription}, + } + if resp.Description == "" { + resp.Description = http.StatusText(code) + } + operation.AddResponse(code, resp) } - operation.Responses.StatusCodeResponses[code] = spec.Response{ - ResponseProps: spec.ResponseProps{Schema: schema, Description: responseDescription}, - } return nil } -// ParseResponseHeaderComment parses comment for gived `response header` comment string. -func (operation *Operation) ParseResponseHeaderComment(commentLine string, astFile *ast.File) error { - var matches []string +func newHeaderSpec(schemaType, description string) spec.Header { + return spec.Header{ + SimpleSchema: spec.SimpleSchema{ + Type: schemaType, + }, + HeaderProps: spec.HeaderProps{ + Description: description, + }, + } +} - if matches = responsePattern.FindStringSubmatch(commentLine); len(matches) != 5 { +// ParseResponseHeaderComment parses comment for given `response header` comment string. +func (operation *Operation) ParseResponseHeaderComment(commentLine string, _ *ast.File) error { + matches := responsePattern.FindStringSubmatch(commentLine) + if len(matches) != 5 { return fmt.Errorf("can not parse response comment \"%s\"", commentLine) } - response := spec.Response{} + header := newHeaderSpec(strings.Trim(matches[2], "{}"), strings.Trim(matches[4], "\"")) - code, _ := strconv.Atoi(matches[1]) - - responseDescription := strings.Trim(matches[4], "\"") - if responseDescription == "" { - responseDescription = http.StatusText(code) + headerKey := matches[3] + if operation.Responses.Default != nil { + if operation.Responses.Default.Headers == nil { + operation.Responses.Default.Headers = make(map[string]spec.Header) + } } - response.Description = responseDescription - schemaType := strings.Trim(matches[2], "{}") - refType := matches[3] + if strings.EqualFold(matches[1], "all") { + if operation.Responses.Default != nil { + operation.Responses.Default.Headers[headerKey] = header + } - if operation.Responses == nil { - operation.Responses = &spec.Responses{ - ResponsesProps: spec.ResponsesProps{ - StatusCodeResponses: make(map[int]spec.Response), - }, + if operation.Responses.StatusCodeResponses != nil { + for code, response := range operation.Responses.StatusCodeResponses { + response.Headers[headerKey] = header + operation.Responses.StatusCodeResponses[code] = response + } } + + return nil } - response, responseExist := operation.Responses.StatusCodeResponses[code] - if responseExist { - header := spec.Header{} - header.Description = responseDescription - header.Type = schemaType + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + if operation.Responses.Default != nil { + operation.Responses.Default.Headers[headerKey] = header + } + + continue + } - if response.Headers == nil { - response.Headers = make(map[string]spec.Header) + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) } - response.Headers[refType] = header + if operation.Responses.StatusCodeResponses != nil { + response, responseExist := operation.Responses.StatusCodeResponses[code] + if responseExist { + if response.Headers == nil { + response.Headers = make(map[string]spec.Header) + } + response.Headers[headerKey] = header - operation.Responses.StatusCodeResponses[code] = response + operation.Responses.StatusCodeResponses[code] = response + } + } } return nil } -var emptyResponsePattern = regexp.MustCompile(`([\d]+)[\s]+"(.*)"`) +var emptyResponsePattern = regexp.MustCompile(`([\w,]+)[\s]+"(.*)"`) -// ParseEmptyResponseComment parse only comment out status code and description,eg: @Success 200 "it's ok" +// ParseEmptyResponseComment parse only comment out status code and description,eg: @Success 200 "it's ok". func (operation *Operation) ParseEmptyResponseComment(commentLine string) error { - var matches []string - - if matches = emptyResponsePattern.FindStringSubmatch(commentLine); len(matches) != 3 { + matches := emptyResponsePattern.FindStringSubmatch(commentLine) + if len(matches) != 3 { return fmt.Errorf("can not parse response comment \"%s\"", commentLine) } - response := spec.Response{} + responseDescription := strings.Trim(matches[2], "\"") + for _, codeStr := range strings.Split(matches[1], ",") { + if strings.EqualFold(codeStr, defaultTag) { + operation.DefaultResponse().Description = responseDescription - code, _ := strconv.Atoi(matches[1]) - - response.Description = strings.Trim(matches[2], "") + continue + } - if operation.Responses == nil { - operation.Responses = &spec.Responses{ - ResponsesProps: spec.ResponsesProps{ - StatusCodeResponses: make(map[int]spec.Response), - }, + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) } - } - operation.Responses.StatusCodeResponses[code] = response + var response spec.Response + response.Description = responseDescription + operation.AddResponse(code, &response) + } return nil } -//ParseEmptyResponseOnly parse only comment out status code ,eg: @Success 200 +// ParseEmptyResponseOnly parse only comment out status code ,eg: @Success 200. func (operation *Operation) ParseEmptyResponseOnly(commentLine string) error { - response := spec.Response{} + for _, codeStr := range strings.Split(commentLine, ",") { + if strings.EqualFold(codeStr, defaultTag) { + _ = operation.DefaultResponse() - code, err := strconv.Atoi(commentLine) - if err != nil { - return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + continue + } + code, err := strconv.Atoi(codeStr) + if err != nil { + return fmt.Errorf("can not parse response comment \"%s\"", commentLine) + } + + var response spec.Response + // response.Description = http.StatusText(code) + operation.AddResponse(code, &response) } + + return nil +} + +// DefaultResponse return the default response member pointer. +func (operation *Operation) DefaultResponse() *spec.Response { + if operation.Responses.Default == nil { + operation.Responses.Default = &spec.Response{} + } + + return operation.Responses.Default +} + +// AddResponse add a response for a code. +func (operation *Operation) AddResponse(code int, response *spec.Response) { if operation.Responses == nil { operation.Responses = &spec.Responses{ ResponsesProps: spec.ResponsesProps{ @@ -904,36 +1001,64 @@ func (operation *Operation) ParseEmptyResponseOnly(commentLine string) error { } } - operation.Responses.StatusCodeResponses[code] = response - - return nil + operation.Responses.StatusCodeResponses[code] = *response } -// createParameter returns swagger spec.Parameter for gived paramType, description, paramName, schemaType, required +// createParameter returns swagger spec.Parameter for given paramType, description, paramName, schemaType, required. func createParameter(paramType, description, paramName, schemaType string, required bool) spec.Parameter { // //five possible parameter types. query, path, body, header, form - paramProps := spec.ParamProps{ - Name: paramName, - Description: description, - Required: required, - In: paramType, + result := spec.Parameter{ + ParamProps: spec.ParamProps{ + Name: paramName, + Description: description, + Required: required, + In: paramType, + }, } + if paramType == "body" { - paramProps.Schema = &spec.Schema{ + result.ParamProps.Schema = &spec.Schema{ SchemaProps: spec.SchemaProps{ Type: []string{schemaType}, }, } - parameter := spec.Parameter{ - ParamProps: paramProps, - } - return parameter + + return result } - parameter := spec.Parameter{ - ParamProps: paramProps, - SimpleSchema: spec.SimpleSchema{ - Type: schemaType, - }, + + result.SimpleSchema = spec.SimpleSchema{ + Type: schemaType, } - return parameter + + return result +} + +func getCodeExampleForSummary(summaryName string, dirPath string) ([]byte, error) { + filesInfos, err := ioutil.ReadDir(dirPath) + if err != nil { + return nil, err + } + + for _, fileInfo := range filesInfos { + if fileInfo.IsDir() { + continue + } + fileName := fileInfo.Name() + + if !strings.Contains(fileName, ".json") { + continue + } + + if strings.Contains(fileName, summaryName) { + fullPath := filepath.Join(dirPath, fileName) + commentInfo, err := ioutil.ReadFile(fullPath) + if err != nil { + return nil, fmt.Errorf("Failed to read code example file %s error: %s ", fullPath, err) + } + + return commentInfo, nil + } + } + + return nil, fmt.Errorf("unable to find code example file for tag %s in the given directory", summaryName) } diff --git a/vendor/github.com/swaggo/swag/packages.go b/vendor/github.com/swaggo/swag/packages.go new file mode 100644 index 00000000..e0add5d6 --- /dev/null +++ b/vendor/github.com/swaggo/swag/packages.go @@ -0,0 +1,336 @@ +package swag + +import ( + "go/ast" + goparser "go/parser" + "go/token" + "os" + "path/filepath" + "sort" + "strings" + + "golang.org/x/tools/go/loader" +) + +// PackagesDefinitions map[package import path]*PackageDefinitions. +type PackagesDefinitions struct { + files map[*ast.File]*AstFileInfo + packages map[string]*PackageDefinitions + uniqueDefinitions map[string]*TypeSpecDef +} + +// NewPackagesDefinitions create object PackagesDefinitions. +func NewPackagesDefinitions() *PackagesDefinitions { + return &PackagesDefinitions{ + files: make(map[*ast.File]*AstFileInfo), + packages: make(map[string]*PackageDefinitions), + uniqueDefinitions: make(map[string]*TypeSpecDef), + } +} + +// CollectAstFile collect ast.file. +func (pkgs *PackagesDefinitions) CollectAstFile(packageDir, path string, astFile *ast.File) error { + if pkgs.files == nil { + pkgs.files = make(map[*ast.File]*AstFileInfo) + } + + if pkgs.packages == nil { + pkgs.packages = make(map[string]*PackageDefinitions) + } + + // return without storing the file if we lack a packageDir + if packageDir == "" { + return nil + } + + path, err := filepath.Abs(path) + if err != nil { + return err + } + + pd, ok := pkgs.packages[packageDir] + if ok { + // return without storing the file if it already exists + _, exists := pd.Files[path] + if exists { + return nil + } + pd.Files[path] = astFile + } else { + pkgs.packages[packageDir] = &PackageDefinitions{ + Name: astFile.Name.Name, + Files: map[string]*ast.File{path: astFile}, + TypeDefinitions: make(map[string]*TypeSpecDef), + } + } + + pkgs.files[astFile] = &AstFileInfo{ + File: astFile, + Path: path, + PackagePath: packageDir, + } + + return nil +} + +// RangeFiles for range the collection of ast.File in alphabetic order. +func (pkgs *PackagesDefinitions) RangeFiles(handle func(filename string, file *ast.File) error) error { + sortedFiles := make([]*AstFileInfo, 0, len(pkgs.files)) + for _, info := range pkgs.files { + sortedFiles = append(sortedFiles, info) + } + + sort.Slice(sortedFiles, func(i, j int) bool { + return strings.Compare(sortedFiles[i].Path, sortedFiles[j].Path) < 0 + }) + + for _, info := range sortedFiles { + err := handle(info.Path, info.File) + if err != nil { + return err + } + } + + return nil +} + +// ParseTypes parse types +// @Return parsed definitions. +func (pkgs *PackagesDefinitions) ParseTypes() (map[*TypeSpecDef]*Schema, error) { + parsedSchemas := make(map[*TypeSpecDef]*Schema) + for astFile, info := range pkgs.files { + pkgs.parseTypesFromFile(astFile, info.PackagePath, parsedSchemas) + } + return parsedSchemas, nil +} + +func (pkgs *PackagesDefinitions) parseTypesFromFile(astFile *ast.File, packagePath string, parsedSchemas map[*TypeSpecDef]*Schema) { + for _, astDeclaration := range astFile.Decls { + if generalDeclaration, ok := astDeclaration.(*ast.GenDecl); ok && generalDeclaration.Tok == token.TYPE { + for _, astSpec := range generalDeclaration.Specs { + if typeSpec, ok := astSpec.(*ast.TypeSpec); ok { + typeSpecDef := &TypeSpecDef{ + PkgPath: packagePath, + File: astFile, + TypeSpec: typeSpec, + } + + if idt, ok := typeSpec.Type.(*ast.Ident); ok && IsGolangPrimitiveType(idt.Name) && parsedSchemas != nil { + parsedSchemas[typeSpecDef] = &Schema{ + PkgPath: typeSpecDef.PkgPath, + Name: astFile.Name.Name, + Schema: PrimitiveSchema(TransToValidSchemeType(idt.Name)), + } + } + + if pkgs.uniqueDefinitions == nil { + pkgs.uniqueDefinitions = make(map[string]*TypeSpecDef) + } + + fullName := typeSpecDef.FullName() + anotherTypeDef, ok := pkgs.uniqueDefinitions[fullName] + if ok { + if typeSpecDef.PkgPath == anotherTypeDef.PkgPath { + continue + } else { + delete(pkgs.uniqueDefinitions, fullName) + } + } else { + pkgs.uniqueDefinitions[fullName] = typeSpecDef + } + + if pkgs.packages[typeSpecDef.PkgPath] == nil { + pkgs.packages[typeSpecDef.PkgPath] = &PackageDefinitions{ + Name: astFile.Name.Name, + TypeDefinitions: map[string]*TypeSpecDef{typeSpecDef.Name(): typeSpecDef}, + } + } else if _, ok = pkgs.packages[typeSpecDef.PkgPath].TypeDefinitions[typeSpecDef.Name()]; !ok { + pkgs.packages[typeSpecDef.PkgPath].TypeDefinitions[typeSpecDef.Name()] = typeSpecDef + } + } + } + } + } +} + +func (pkgs *PackagesDefinitions) findTypeSpec(pkgPath string, typeName string) *TypeSpecDef { + if pkgs.packages == nil { + return nil + } + pd, found := pkgs.packages[pkgPath] + if found { + typeSpec, ok := pd.TypeDefinitions[typeName] + if ok { + return typeSpec + } + } + + return nil +} + +func (pkgs *PackagesDefinitions) loadExternalPackage(importPath string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + + conf := loader.Config{ + ParserMode: goparser.ParseComments, + Cwd: cwd, + } + + conf.Import(importPath) + + loaderProgram, err := conf.Load() + if err != nil { + return err + } + + for _, info := range loaderProgram.AllPackages { + pkgPath := strings.TrimPrefix(info.Pkg.Path(), "vendor/") + for _, astFile := range info.Files { + pkgs.parseTypesFromFile(astFile, pkgPath, nil) + } + } + + return nil +} + +// findPackagePathFromImports finds out the package path of a package via ranging imports of an ast.File +// @pkg the name of the target package +// @file current ast.File in which to search imports +// @fuzzy search for the package path that the last part matches the @pkg if true +// @return the package path of a package of @pkg. +func (pkgs *PackagesDefinitions) findPackagePathFromImports(pkg string, file *ast.File, fuzzy bool) string { + if file == nil { + return "" + } + + if strings.ContainsRune(pkg, '.') { + pkg = strings.Split(pkg, ".")[0] + } + + hasAnonymousPkg := false + + matchLastPathPart := func(pkgPath string) bool { + paths := strings.Split(pkgPath, "/") + return paths[len(paths)-1] == pkg + } + + // prior to match named package + for _, imp := range file.Imports { + if imp.Name != nil { + if imp.Name.Name == pkg { + return strings.Trim(imp.Path.Value, `"`) + } + if imp.Name.Name == "_" { + hasAnonymousPkg = true + } + + continue + } + if pkgs.packages != nil { + path := strings.Trim(imp.Path.Value, `"`) + if fuzzy { + if matchLastPathPart(path) { + return path + } + } else if pd, ok := pkgs.packages[path]; ok && pd.Name == pkg { + return path + } + } + } + + // match unnamed package + if hasAnonymousPkg && pkgs.packages != nil { + for _, imp := range file.Imports { + if imp.Name == nil { + continue + } + if imp.Name.Name == "_" { + path := strings.Trim(imp.Path.Value, `"`) + if fuzzy { + if matchLastPathPart(path) { + return path + } + } else if pd, ok := pkgs.packages[path]; ok && pd.Name == pkg { + return path + } + } + } + } + + return "" +} + +// FindTypeSpec finds out TypeSpecDef of a type by typeName +// @typeName the name of the target type, if it starts with a package name, find its own package path from imports on top of @file +// @file the ast.file in which @typeName is used +// @pkgPath the package path of @file. +func (pkgs *PackagesDefinitions) FindTypeSpec(typeName string, file *ast.File, parseDependency bool) *TypeSpecDef { + if IsGolangPrimitiveType(typeName) { + return nil + } + if file == nil { // for test + return pkgs.uniqueDefinitions[typeName] + } + + parts := strings.Split(typeName, ".") + if len(parts) > 1 { + isAliasPkgName := func(file *ast.File, pkgName string) bool { + if file != nil && file.Imports != nil { + for _, pkg := range file.Imports { + if pkg.Name != nil && pkg.Name.Name == pkgName { + return true + } + } + } + + return false + } + + if !isAliasPkgName(file, parts[0]) { + typeDef, ok := pkgs.uniqueDefinitions[typeName] + if ok { + return typeDef + } + } + pkgPath := pkgs.findPackagePathFromImports(parts[0], file, false) + if len(pkgPath) == 0 { + // check if the current package + if parts[0] == file.Name.Name { + pkgPath = pkgs.files[file].PackagePath + } else if parseDependency { + // take it as an external package, needs to be loaded + if pkgPath = pkgs.findPackagePathFromImports(parts[0], file, true); len(pkgPath) > 0 { + if err := pkgs.loadExternalPackage(pkgPath); err != nil { + return nil + } + } + } + } + + return pkgs.findTypeSpec(pkgPath, parts[1]) + } + + typeDef, ok := pkgs.uniqueDefinitions[fullTypeName(file.Name.Name, typeName)] + if ok { + return typeDef + } + + typeDef = pkgs.findTypeSpec(pkgs.files[file].PackagePath, typeName) + if typeDef != nil { + return typeDef + } + + for _, imp := range file.Imports { + if imp.Name != nil && imp.Name.Name == "." { + typeDef := pkgs.findTypeSpec(strings.Trim(imp.Path.Value, `"`), typeName) + if typeDef != nil { + return typeDef + } + } + } + + return nil +} diff --git a/vendor/github.com/swaggo/swag/parser.go b/vendor/github.com/swaggo/swag/parser.go index b4871f43..411e13c5 100644 --- a/vendor/github.com/swaggo/swag/parser.go +++ b/vendor/github.com/swaggo/swag/parser.go @@ -2,13 +2,16 @@ package swag import ( "encoding/json" + "errors" "fmt" "go/ast" "go/build" goparser "go/parser" "go/token" "io/ioutil" + "log" "net/http" + "net/url" "os" "os/exec" "path/filepath" @@ -16,10 +19,8 @@ import ( "sort" "strconv" "strings" - "unicode" "github.com/KyleBanks/depth" - "github.com/go-openapi/jsonreference" "github.com/go-openapi/spec" ) @@ -32,30 +33,61 @@ const ( // SnakeCase indicates using SnakeCase strategy for struct field. SnakeCase = "snakecase" + + acceptAttr = "@accept" + produceAttr = "@produce" + xCodeSamplesAttr = "@x-codesamples" + scopeAttrPrefix = "@scope." +) + +var ( + // ErrRecursiveParseStruct recursively parsing struct. + ErrRecursiveParseStruct = errors.New("recursively parsing struct") + + // ErrFuncTypeField field type is func. + ErrFuncTypeField = errors.New("field type is func") + + // ErrFailedConvertPrimitiveType Failed to convert for swag to interpretable type. + ErrFailedConvertPrimitiveType = errors.New("swag property: failed convert primitive type") ) +var allMethod = map[string]struct{}{ + http.MethodGet: {}, + http.MethodPut: {}, + http.MethodPost: {}, + http.MethodDelete: {}, + http.MethodOptions: {}, + http.MethodHead: {}, + http.MethodPatch: {}, +} + // Parser implements a parser for Go source files. type Parser struct { // swagger represents the root document object for the API specification swagger *spec.Swagger - // files is a map that stores map[real_go_file_path][astFile] - files map[string]*ast.File + // packages store entities of APIs, definitions, file, package path etc. and their relations + packages *PackagesDefinitions + + // parsedSchemas store schemas which have been parsed from ast.TypeSpec + parsedSchemas map[*TypeSpecDef]*Schema - // TypeDefinitions is a map that stores [package name][type name][*ast.TypeSpec] - TypeDefinitions map[string]map[string]*ast.TypeSpec + // outputSchemas store schemas which will be export to swagger + outputSchemas map[*TypeSpecDef]*Schema - // ImportAliases is map that stores [import name][import package name][*ast.ImportSpec] - ImportAliases map[string]map[string]*ast.ImportSpec + // existSchemaNames store names of models for conflict determination + existSchemaNames map[string]*Schema - // CustomPrimitiveTypes is a map that stores custom primitive types to actual golang types [type name][string] - CustomPrimitiveTypes map[string]string + // toBeRenamedSchemas names of models to be renamed + toBeRenamedSchemas map[string]string - // registerTypes is a map that stores [refTypeName][*ast.TypeSpec] - registerTypes map[string]*ast.TypeSpec + // toBeRenamedSchemas URLs of ref models to be renamed + toBeRenamedRefURLs []*url.URL + // PropNamingStrategy naming strategy PropNamingStrategy string + // ParseVendor parse vendor folder ParseVendor bool // ParseDependencies whether swag should be parse outside dependency folder @@ -64,42 +96,79 @@ type Parser struct { // ParseInternal whether swag should parse internal packages ParseInternal bool + // Strict whether swag should error or warn when it detects cases which are most likely user errors + Strict bool + // structStack stores full names of the structures that were already parsed or are being parsed now - structStack []string + structStack []*TypeSpecDef // markdownFileDir holds the path to the folder, where markdown files are stored markdownFileDir string + // codeExampleFilesDir holds path to the folder, where code example files are stored + codeExampleFilesDir string + // collectionFormatInQuery set the default collectionFormat otherwise then 'csv' for array in query params collectionFormatInQuery string // excludes excludes dirs and files in SearchDir excludes map[string]bool + + // debugging output goes here + debug Debugger + + // fieldParserFactory create FieldParser + fieldParserFactory FieldParserFactory +} + +// FieldParserFactory create FieldParser +type FieldParserFactory func(ps *Parser, field *ast.Field) FieldParser + +// FieldParser parse struct field +type FieldParser interface { + ShouldSkip() (bool, error) + FieldName() (string, error) + CustomSchema() (*spec.Schema, error) + ComplementSchema(schema *spec.Schema) error + IsRequired() (bool, error) +} + +// Debugger is the interface that wraps the basic Printf method. +type Debugger interface { + Printf(format string, v ...interface{}) } // New creates a new Parser with default properties. func New(options ...func(*Parser)) *Parser { + // parser.swagger.SecurityDefinitions = + parser := &Parser{ swagger: &spec.Swagger{ SwaggerProps: spec.SwaggerProps{ Info: &spec.Info{ InfoProps: spec.InfoProps{ Contact: &spec.ContactInfo{}, - License: &spec.License{}, + License: nil, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{}, }, }, Paths: &spec.Paths{ Paths: make(map[string]spec.PathItem), }, - Definitions: make(map[string]spec.Schema), + Definitions: make(map[string]spec.Schema), + SecurityDefinitions: make(map[string]*spec.SecurityScheme), }, }, - files: make(map[string]*ast.File), - TypeDefinitions: make(map[string]map[string]*ast.TypeSpec), - ImportAliases: make(map[string]map[string]*ast.ImportSpec), - CustomPrimitiveTypes: make(map[string]string), - registerTypes: make(map[string]*ast.TypeSpec), - excludes: make(map[string]bool), + packages: NewPackagesDefinitions(), + debug: log.New(os.Stdout, "", log.LstdFlags), + parsedSchemas: make(map[*TypeSpecDef]*Schema), + outputSchemas: make(map[*TypeSpecDef]*Schema), + existSchemaNames: make(map[string]*Schema), + toBeRenamedSchemas: make(map[string]string), + excludes: make(map[string]bool), + fieldParserFactory: newTagBaseFieldParser, } for _, option := range options { @@ -109,14 +178,21 @@ func New(options ...func(*Parser)) *Parser { return parser } -// SetMarkdownFileDirectory sets the directory to search for markdownfiles +// SetMarkdownFileDirectory sets the directory to search for markdown files. func SetMarkdownFileDirectory(directoryPath string) func(*Parser) { return func(p *Parser) { p.markdownFileDir = directoryPath } } -// SetExcludedDirsAndFiles sets directories and files to be excluded when searching +// SetCodeExamplesDirectory sets the directory to search for code example files. +func SetCodeExamplesDirectory(directoryPath string) func(*Parser) { + return func(p *Parser) { + p.codeExampleFilesDir = directoryPath + } +} + +// SetExcludedDirsAndFiles sets directories and files to be excluded when searching. func SetExcludedDirsAndFiles(excludes string) func(*Parser) { return func(p *Parser) { for _, f := range strings.Split(excludes, ",") { @@ -129,52 +205,94 @@ func SetExcludedDirsAndFiles(excludes string) func(*Parser) { } } -// ParseAPI parses general api info for given searchDir and mainAPIFile -func (parser *Parser) ParseAPI(searchDir string, mainAPIFile string) error { - Printf("Generate general API Info, search dir:%s", searchDir) +// SetStrict sets whether swag should error or warn when it detects cases which are most likely user errors. +func SetStrict(strict bool) func(*Parser) { + return func(p *Parser) { + p.Strict = strict + } +} + +// SetDebugger allows the use of user-defined implementations. +func SetDebugger(logger Debugger) func(parser *Parser) { + return func(p *Parser) { + p.debug = logger + } +} - if err := parser.getAllGoFileInfo(searchDir); err != nil { - return err +// SetFieldParserFactory allows the use of user-defined implementations. +func SetFieldParserFactory(factory FieldParserFactory) func(parser *Parser) { + return func(p *Parser) { + p.fieldParserFactory = factory } +} + +// ParseAPI parses general api info for given searchDir and mainAPIFile. +func (parser *Parser) ParseAPI(searchDir string, mainAPIFile string, parseDepth int) error { + return parser.ParseAPIMultiSearchDir([]string{searchDir}, mainAPIFile, parseDepth) +} + +// ParseAPIMultiSearchDir is like ParseAPI but for multiple search dirs. +func (parser *Parser) ParseAPIMultiSearchDir(searchDirs []string, mainAPIFile string, parseDepth int) error { + for _, searchDir := range searchDirs { + parser.debug.Printf("Generate general API Info, search dir:%s", searchDir) - var t depth.Tree - t.ResolveInternal = true + packageDir, err := getPkgName(searchDir) + if err != nil { + parser.debug.Printf("warning: failed to get package name in dir: %s, error: %s", searchDir, err.Error()) + } + + err = parser.getAllGoFileInfo(packageDir, searchDir) + if err != nil { + return err + } + } - absMainAPIFilePath, err := filepath.Abs(filepath.Join(searchDir, mainAPIFile)) + absMainAPIFilePath, err := filepath.Abs(filepath.Join(searchDirs[0], mainAPIFile)) if err != nil { return err } if parser.ParseDependency { + var t depth.Tree + t.ResolveInternal = true + t.MaxDepth = parseDepth + pkgName, err := getPkgName(filepath.Dir(absMainAPIFilePath)) if err != nil { return err } - if err := t.Resolve(pkgName); err != nil { + + err = t.Resolve(pkgName) + if err != nil { return fmt.Errorf("pkg %s cannot find all dependencies, %s", pkgName, err) } + for i := 0; i < len(t.Root.Deps); i++ { - if err := parser.getAllGoFileInfoFromDeps(&t.Root.Deps[i]); err != nil { + err := parser.getAllGoFileInfoFromDeps(&t.Root.Deps[i]) + if err != nil { return err } } } - if err := parser.ParseGeneralAPIInfo(absMainAPIFilePath); err != nil { + err = parser.ParseGeneralAPIInfo(absMainAPIFilePath) + if err != nil { return err } - for _, astFile := range parser.files { - parser.ParseType(astFile) + parser.parsedSchemas, err = parser.packages.ParseTypes() + if err != nil { + return err } - for fileName, astFile := range parser.files { - if err := parser.ParseRouterAPIInfo(fileName, astFile); err != nil { - return err - } + err = parser.packages.RangeFiles(parser.ParseRouterAPIInfo) + if err != nil { + return err } - return parser.parseDefinitions() + parser.renameRefSchemas() + + return parser.checkOperationIDUniqueness() } func getPkgName(searchDir string) (string, error) { @@ -199,236 +317,323 @@ func getPkgName(searchDir string) (string, error) { return outStr, nil } -// ParseGeneralAPIInfo parses general api info for given mainAPIFile path +func initIfEmpty(license *spec.License) *spec.License { + if license == nil { + return new(spec.License) + } + + return license +} + +// ParseGeneralAPIInfo parses general api info for given mainAPIFile path. func (parser *Parser) ParseGeneralAPIInfo(mainAPIFile string) error { - fileSet := token.NewFileSet() - fileTree, err := goparser.ParseFile(fileSet, mainAPIFile, nil, goparser.ParseComments) + fileTree, err := goparser.ParseFile(token.NewFileSet(), mainAPIFile, nil, goparser.ParseComments) if err != nil { return fmt.Errorf("cannot parse source files %s: %s", mainAPIFile, err) } parser.swagger.Swagger = "2.0" - securityMap := map[string]*spec.SecurityScheme{} for _, comment := range fileTree.Comments { - if !isGeneralAPIComment(comment) { + comments := strings.Split(comment.Text(), "\n") + if !isGeneralAPIComment(comments) { continue } - comments := strings.Split(comment.Text(), "\n") - previousAttribute := "" - // parsing classic meta data model - for i, commentLine := range comments { - attribute := strings.ToLower(strings.Split(commentLine, " ")[0]) - value := strings.TrimSpace(commentLine[len(attribute):]) - multilineBlock := false - if previousAttribute == attribute { - multilineBlock = true + err := parseGeneralAPIInfo(parser, comments) + if err != nil { + return err + } + } + + return nil +} + +func parseGeneralAPIInfo(parser *Parser, comments []string) error { + previousAttribute := "" + + // parsing classic meta data model + for i, commentLine := range comments { + attribute := strings.Split(commentLine, " ")[0] + value := strings.TrimSpace(commentLine[len(attribute):]) + multilineBlock := false + if previousAttribute == attribute { + multilineBlock = true + } + switch strings.ToLower(attribute) { + case "@version": + parser.swagger.Info.Version = value + case "@title": + parser.swagger.Info.Title = value + case "@description": + if multilineBlock { + parser.swagger.Info.Description += "\n" + value + + continue } - switch attribute { - case "@version": - parser.swagger.Info.Version = value - case "@title": - parser.swagger.Info.Title = value - case "@description": - if multilineBlock { - parser.swagger.Info.Description += "\n" + value - continue - } - parser.swagger.Info.Description = value - case "@description.markdown": - commentInfo, err := getMarkdownForTag("api", parser.markdownFileDir) - if err != nil { - return err - } - parser.swagger.Info.Description = string(commentInfo) - case "@termsofservice": - parser.swagger.Info.TermsOfService = value - case "@contact.name": - parser.swagger.Info.Contact.Name = value - case "@contact.email": - parser.swagger.Info.Contact.Email = value - case "@contact.url": - parser.swagger.Info.Contact.URL = value - case "@license.name": - parser.swagger.Info.License.Name = value - case "@license.url": - parser.swagger.Info.License.URL = value - case "@host": - parser.swagger.Host = value - case "@basepath": - parser.swagger.BasePath = value - case "@schemes": - parser.swagger.Schemes = getSchemes(commentLine) - case "@tag.name": - parser.swagger.Tags = append(parser.swagger.Tags, spec.Tag{ - TagProps: spec.TagProps{ - Name: value, - }, - }) - case "@tag.description": - tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] - tag.TagProps.Description = value - replaceLastTag(parser.swagger.Tags, tag) - case "@tag.description.markdown": - tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] - commentInfo, err := getMarkdownForTag(tag.TagProps.Name, parser.markdownFileDir) - if err != nil { - return err - } - tag.TagProps.Description = string(commentInfo) - replaceLastTag(parser.swagger.Tags, tag) - case "@tag.docs.url": - tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] - tag.TagProps.ExternalDocs = &spec.ExternalDocumentation{ - URL: value, - } - replaceLastTag(parser.swagger.Tags, tag) - case "@tag.docs.description": - tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] - if tag.TagProps.ExternalDocs == nil { - return fmt.Errorf("%s needs to come after a @tags.docs.url", attribute) - } - tag.TagProps.ExternalDocs.Description = value - replaceLastTag(parser.swagger.Tags, tag) - case "@securitydefinitions.basic": - securityMap[value] = spec.BasicAuth() - case "@securitydefinitions.apikey": - attrMap, _, err := extractSecurityAttribute(attribute, []string{"@in", "@name"}, comments[i+1:]) - if err != nil { - return err - } - securityMap[value] = spec.APIKeyAuth(attrMap["@name"], attrMap["@in"]) - case "@securitydefinitions.oauth2.application": - attrMap, scopes, err := extractSecurityAttribute(attribute, []string{"@tokenurl"}, comments[i+1:]) - if err != nil { - return err + parser.swagger.Info.Description = value + case "@description.markdown": + commentInfo, err := getMarkdownForTag("api", parser.markdownFileDir) + if err != nil { + return err + } + parser.swagger.Info.Description = string(commentInfo) + case "@termsofservice": + parser.swagger.Info.TermsOfService = value + case "@contact.name": + parser.swagger.Info.Contact.Name = value + case "@contact.email": + parser.swagger.Info.Contact.Email = value + case "@contact.url": + parser.swagger.Info.Contact.URL = value + case "@license.name": + parser.swagger.Info.License = initIfEmpty(parser.swagger.Info.License) + parser.swagger.Info.License.Name = value + case "@license.url": + parser.swagger.Info.License = initIfEmpty(parser.swagger.Info.License) + parser.swagger.Info.License.URL = value + case "@host": + parser.swagger.Host = value + case "@basepath": + parser.swagger.BasePath = value + case acceptAttr: + err := parser.ParseAcceptComment(value) + if err != nil { + return err + } + case produceAttr: + err := parser.ParseProduceComment(value) + if err != nil { + return err + } + case "@schemes": + parser.swagger.Schemes = getSchemes(commentLine) + case "@tag.name": + parser.swagger.Tags = append(parser.swagger.Tags, spec.Tag{ + TagProps: spec.TagProps{ + Name: value, + }, + }) + case "@tag.description": + tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] + tag.TagProps.Description = value + replaceLastTag(parser.swagger.Tags, tag) + case "@tag.description.markdown": + tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] + commentInfo, err := getMarkdownForTag(tag.TagProps.Name, parser.markdownFileDir) + if err != nil { + return err + } + tag.TagProps.Description = string(commentInfo) + replaceLastTag(parser.swagger.Tags, tag) + case "@tag.docs.url": + tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] + tag.TagProps.ExternalDocs = &spec.ExternalDocumentation{ + URL: value, + } + replaceLastTag(parser.swagger.Tags, tag) + case "@tag.docs.description": + tag := parser.swagger.Tags[len(parser.swagger.Tags)-1] + if tag.TagProps.ExternalDocs == nil { + return fmt.Errorf("%s needs to come after a @tags.docs.url", attribute) + } + tag.TagProps.ExternalDocs.Description = value + replaceLastTag(parser.swagger.Tags, tag) + case "@securitydefinitions.basic": + parser.swagger.SecurityDefinitions[value] = spec.BasicAuth() + case "@securitydefinitions.apikey": + attrMap, _, _, err := parseSecAttr(attribute, []string{"@in", "@name"}, comments[i+1:]) + if err != nil { + return err + } + parser.swagger.SecurityDefinitions[value] = spec.APIKeyAuth(attrMap["@name"], attrMap["@in"]) + case "@securitydefinitions.oauth2.application": + attrMap, scopes, extensions, err := parseSecAttr(attribute, []string{"@tokenurl"}, comments[i+1:]) + if err != nil { + return err + } + parser.swagger.SecurityDefinitions[value] = secOAuth2Application(attrMap["@tokenurl"], scopes, extensions) + case "@securitydefinitions.oauth2.implicit": + attrs, scopes, ext, err := parseSecAttr(attribute, []string{"@authorizationurl"}, comments[i+1:]) + if err != nil { + return err + } + parser.swagger.SecurityDefinitions[value] = secOAuth2Implicit(attrs["@authorizationurl"], scopes, ext) + case "@securitydefinitions.oauth2.password": + attrs, scopes, ext, err := parseSecAttr(attribute, []string{"@tokenurl"}, comments[i+1:]) + if err != nil { + return err + } + parser.swagger.SecurityDefinitions[value] = secOAuth2Password(attrs["@tokenurl"], scopes, ext) + case "@securitydefinitions.oauth2.accesscode": + attrs, scopes, ext, err := parseSecAttr(attribute, []string{"@tokenurl", "@authorizationurl"}, comments[i+1:]) + if err != nil { + return err + } + parser.swagger.SecurityDefinitions[value] = secOAuth2AccessToken(attrs["@authorizationurl"], attrs["@tokenurl"], scopes, ext) + case "@query.collection.format": + parser.collectionFormatInQuery = value + default: + prefixExtension := "@x-" + // Prefix extension + 1 char + 1 space + 1 char + if len(attribute) > 5 && attribute[:len(prefixExtension)] == prefixExtension { + extExistsInSecurityDef := false + // for each security definition + for _, v := range parser.swagger.SecurityDefinitions { + // check if extension exists + _, extExistsInSecurityDef = v.VendorExtensible.Extensions.GetString(attribute[1:]) + // if it exists in at least one, then we stop iterating + if extExistsInSecurityDef { + break + } } - securityMap[value] = securitySchemeOAuth2Application(attrMap["@tokenurl"], scopes) - case "@securitydefinitions.oauth2.implicit": - attrMap, scopes, err := extractSecurityAttribute(attribute, []string{"@authorizationurl"}, comments[i+1:]) - if err != nil { - return err + // if it is present on security def, don't add it again + if extExistsInSecurityDef { + break } - securityMap[value] = securitySchemeOAuth2Implicit(attrMap["@authorizationurl"], scopes) - case "@securitydefinitions.oauth2.password": - attrMap, scopes, err := extractSecurityAttribute(attribute, []string{"@tokenurl"}, comments[i+1:]) - if err != nil { - return err + + var valueJSON interface{} + split := strings.SplitAfter(commentLine, attribute+" ") + if len(split) < 2 { + return fmt.Errorf("annotation %s need a value", attribute) } - securityMap[value] = securitySchemeOAuth2Password(attrMap["@tokenurl"], scopes) - case "@securitydefinitions.oauth2.accesscode": - attrMap, scopes, err := extractSecurityAttribute(attribute, []string{"@tokenurl", "@authorizationurl"}, comments[i+1:]) + extensionName := "x-" + strings.SplitAfter(attribute, prefixExtension)[1] + err := json.Unmarshal([]byte(split[1]), &valueJSON) if err != nil { - return err + return fmt.Errorf("annotation %s need a valid json value", attribute) } - securityMap[value] = securitySchemeOAuth2AccessToken(attrMap["@authorizationurl"], attrMap["@tokenurl"], scopes) - case "@query.collection.format": - parser.collectionFormatInQuery = value - default: - prefixExtension := "@x-" - if len(attribute) > 5 { // Prefix extension + 1 char + 1 space + 1 char - if attribute[:len(prefixExtension)] == prefixExtension { - var valueJSON interface{} - split := strings.SplitAfter(commentLine, attribute+" ") - if len(split) < 2 { - return fmt.Errorf("annotation %s need a value", attribute) - } - extensionName := "x-" + strings.SplitAfter(attribute, prefixExtension)[1] - if err := json.Unmarshal([]byte(split[1]), &valueJSON); err != nil { - return fmt.Errorf("annotation %s need a valid json value", attribute) - } - parser.swagger.AddExtension(extensionName, valueJSON) + + if strings.Contains(extensionName, "logo") { + parser.swagger.Info.Extensions.Add(extensionName, valueJSON) + } else { + if parser.swagger.Extensions == nil { + parser.swagger.Extensions = make(map[string]interface{}) } + parser.swagger.Extensions[attribute[1:]] = valueJSON } } - previousAttribute = attribute } - } - - if len(securityMap) > 0 { - parser.swagger.SecurityDefinitions = securityMap + previousAttribute = attribute } return nil } -func isGeneralAPIComment(comment *ast.CommentGroup) bool { - for _, commentLine := range strings.Split(comment.Text(), "\n") { +// ParseAcceptComment parses comment for given `accept` comment string. +func (parser *Parser) ParseAcceptComment(commentLine string) error { + return parseMimeTypeList(commentLine, &parser.swagger.Consumes, "%v accept type can't be accepted") +} + +// ParseProduceComment parses comment for given `produce` comment string. +func (parser *Parser) ParseProduceComment(commentLine string) error { + return parseMimeTypeList(commentLine, &parser.swagger.Produces, "%v produce type can't be accepted") +} + +func isGeneralAPIComment(comments []string) bool { + for _, commentLine := range comments { attribute := strings.ToLower(strings.Split(commentLine, " ")[0]) switch attribute { - // The @summary, @router, @success,@failure annotation belongs to Operation - case "@summary", "@router", "@success", "@failure": + // The @summary, @router, @success, @failure annotation belongs to Operation + case "@summary", "@router", "@success", "@failure", "@response": return false } } + return true } -func extractSecurityAttribute(context string, search []string, lines []string) (map[string]string, map[string]string, error) { +func parseSecAttr(context string, search []string, lines []string) (map[string]string, map[string]string, map[string]interface{}, error) { attrMap := map[string]string{} scopes := map[string]string{} + extensions := map[string]interface{}{} for _, v := range lines { securityAttr := strings.ToLower(strings.Split(v, " ")[0]) for _, findterm := range search { if securityAttr == findterm { attrMap[securityAttr] = strings.TrimSpace(v[len(securityAttr):]) + continue } } isExists, err := isExistsScope(securityAttr) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if isExists { - scopScheme, err := getScopeScheme(securityAttr) - if err != nil { - return nil, nil, err - } - scopes[scopScheme] = v[len(securityAttr):] + scopes[securityAttr[len(scopeAttrPrefix):]] = v[len(securityAttr):] + } + if strings.HasPrefix(securityAttr, "@x-") { + // Add the custom attribute without the @ + extensions[securityAttr[1:]] = strings.TrimSpace(v[len(securityAttr):]) } // next securityDefinitions if strings.Index(securityAttr, "@securitydefinitions.") == 0 { break } } + if len(attrMap) != len(search) { - return nil, nil, fmt.Errorf("%s is %v required", context, search) + return nil, nil, nil, fmt.Errorf("%s is %v required", context, search) } - return attrMap, scopes, nil + + return attrMap, scopes, extensions, nil } -func securitySchemeOAuth2Application(tokenurl string, scopes map[string]string) *spec.SecurityScheme { - securityScheme := spec.OAuth2Application(tokenurl) +func secOAuth2Application(tokenURL string, scopes map[string]string, + extensions map[string]interface{}) *spec.SecurityScheme { + securityScheme := spec.OAuth2Application(tokenURL) + securityScheme.VendorExtensible.Extensions = handleSecuritySchemaExtensions(extensions) for scope, description := range scopes { securityScheme.AddScope(scope, description) } + return securityScheme } -func securitySchemeOAuth2Implicit(authorizationurl string, scopes map[string]string) *spec.SecurityScheme { - securityScheme := spec.OAuth2Implicit(authorizationurl) +func secOAuth2Implicit(authorizationURL string, scopes map[string]string, + extensions map[string]interface{}) *spec.SecurityScheme { + securityScheme := spec.OAuth2Implicit(authorizationURL) + securityScheme.VendorExtensible.Extensions = handleSecuritySchemaExtensions(extensions) for scope, description := range scopes { securityScheme.AddScope(scope, description) } + return securityScheme } -func securitySchemeOAuth2Password(tokenurl string, scopes map[string]string) *spec.SecurityScheme { - securityScheme := spec.OAuth2Password(tokenurl) +func secOAuth2Password(tokenURL string, scopes map[string]string, + extensions map[string]interface{}) *spec.SecurityScheme { + securityScheme := spec.OAuth2Password(tokenURL) + securityScheme.VendorExtensible.Extensions = handleSecuritySchemaExtensions(extensions) for scope, description := range scopes { securityScheme.AddScope(scope, description) } + return securityScheme } -func securitySchemeOAuth2AccessToken(authorizationurl, tokenurl string, scopes map[string]string) *spec.SecurityScheme { - securityScheme := spec.OAuth2AccessToken(authorizationurl, tokenurl) +func secOAuth2AccessToken(authorizationURL, tokenURL string, + scopes map[string]string, extensions map[string]interface{}) *spec.SecurityScheme { + securityScheme := spec.OAuth2AccessToken(authorizationURL, tokenURL) + securityScheme.VendorExtensible.Extensions = handleSecuritySchemaExtensions(extensions) for scope, description := range scopes { securityScheme.AddScope(scope, description) } + return securityScheme } +func handleSecuritySchemaExtensions(providedExtensions map[string]interface{}) spec.Extensions { + var extensions spec.Extensions + if len(providedExtensions) > 0 { + extensions = make(map[string]interface{}, len(providedExtensions)) + for key, value := range providedExtensions { + extensions[key] = value + } + } + + return extensions +} + func getMarkdownForTag(tagName string, dirPath string) ([]byte, error) { filesInfos, err := ioutil.ReadDir(dirPath) if err != nil { @@ -451,75 +656,71 @@ func getMarkdownForTag(tagName string, dirPath string) ([]byte, error) { if err != nil { return nil, fmt.Errorf("Failed to read markdown file %s error: %s ", fullPath, err) } + return commentInfo, nil } } - return nil, fmt.Errorf("Unable to find markdown file for tag %s in the given directory", tagName) -} -func getScopeScheme(scope string) (string, error) { - scopeValue := scope[strings.Index(scope, "@scope."):] - if scopeValue == "" { - return "", fmt.Errorf("@scope is empty") - } - return scope[len("@scope."):], nil + return nil, fmt.Errorf("Unable to find markdown file for tag %s in the given directory", tagName) } func isExistsScope(scope string) (bool, error) { s := strings.Fields(scope) for _, v := range s { - if strings.Contains(v, "@scope.") { + if strings.Contains(v, scopeAttrPrefix) { if strings.Contains(v, ",") { return false, fmt.Errorf("@scope can't use comma(,) get=" + v) } } } - return strings.Contains(scope, "@scope."), nil + + return strings.Contains(scope, scopeAttrPrefix), nil } -// getSchemes parses swagger schemes for given commentLine +// getSchemes parses swagger schemes for given commentLine. func getSchemes(commentLine string) []string { attribute := strings.ToLower(strings.Split(commentLine, " ")[0]) + return strings.Split(strings.TrimSpace(commentLine[len(attribute):]), " ") } -// ParseRouterAPIInfo parses router api info for given astFile +// ParseRouterAPIInfo parses router api info for given astFile. func (parser *Parser) ParseRouterAPIInfo(fileName string, astFile *ast.File) error { for _, astDescription := range astFile.Decls { - switch astDeclaration := astDescription.(type) { - case *ast.FuncDecl: - if astDeclaration.Doc != nil && astDeclaration.Doc.List != nil { - operation := NewOperation() //for per 'function' comment, create a new 'Operation' object - operation.parser = parser - for _, comment := range astDeclaration.Doc.List { - if err := operation.ParseComment(comment.Text, astFile); err != nil { - return fmt.Errorf("ParseComment error in file %s :%+v", fileName, err) - } + astDeclaration, ok := astDescription.(*ast.FuncDecl) + if ok && astDeclaration.Doc != nil && astDeclaration.Doc.List != nil { + // for per 'function' comment, create a new 'Operation' object + operation := NewOperation(parser, SetCodeExampleFilesDirectory(parser.codeExampleFilesDir)) + for _, comment := range astDeclaration.Doc.List { + err := operation.ParseComment(comment.Text, astFile) + if err != nil { + return fmt.Errorf("ParseComment error in file %s :%+v", fileName, err) } + } + + for _, routeProperties := range operation.RouterProperties { var pathItem spec.PathItem var ok bool - if pathItem, ok = parser.swagger.Paths.Paths[operation.Path]; !ok { + pathItem, ok = parser.swagger.Paths.Paths[routeProperties.Path] + if !ok { pathItem = spec.PathItem{} } - switch strings.ToUpper(operation.HTTPMethod) { - case http.MethodGet: - pathItem.Get = &operation.Operation - case http.MethodPost: - pathItem.Post = &operation.Operation - case http.MethodDelete: - pathItem.Delete = &operation.Operation - case http.MethodPut: - pathItem.Put = &operation.Operation - case http.MethodPatch: - pathItem.Patch = &operation.Operation - case http.MethodHead: - pathItem.Head = &operation.Operation - case http.MethodOptions: - pathItem.Options = &operation.Operation + + op := refRouteMethodOp(&pathItem, routeProperties.HTTPMethod) + + // check if we already have a operation for this path and method + if *op != nil { + err := fmt.Errorf("route %s %s is declared multiple times", routeProperties.HTTPMethod, routeProperties.Path) + if parser.Strict { + return err + } + parser.debug.Printf("warning: %s\n", err) } - parser.swagger.Paths.Paths[operation.Path] = pathItem + *op = &operation.Operation + + parser.swagger.Paths.Paths[routeProperties.Path] = pathItem } } } @@ -527,958 +728,472 @@ func (parser *Parser) ParseRouterAPIInfo(fileName string, astFile *ast.File) err return nil } -// ParseType parses type info for given astFile. -func (parser *Parser) ParseType(astFile *ast.File) { - if _, ok := parser.TypeDefinitions[astFile.Name.String()]; !ok { - parser.TypeDefinitions[astFile.Name.String()] = make(map[string]*ast.TypeSpec) - } - - for _, astDeclaration := range astFile.Decls { - if generalDeclaration, ok := astDeclaration.(*ast.GenDecl); ok && generalDeclaration.Tok == token.TYPE { - for _, astSpec := range generalDeclaration.Specs { - if typeSpec, ok := astSpec.(*ast.TypeSpec); ok { - typeName := fmt.Sprintf("%v", typeSpec.Type) - // check if its a custom primitive type - if IsGolangPrimitiveType(typeName) { - var typeSpecFullName = fmt.Sprintf("%s.%s", astFile.Name.String(), typeSpec.Name.String()) - parser.CustomPrimitiveTypes[typeSpecFullName] = TransToValidSchemeType(typeName) - } else { - parser.TypeDefinitions[astFile.Name.String()][typeSpec.Name.String()] = typeSpec - } +func refRouteMethodOp(item *spec.PathItem, method string) (op **spec.Operation) { + switch method { + case http.MethodGet: + op = &item.Get + case http.MethodPost: + op = &item.Post + case http.MethodDelete: + op = &item.Delete + case http.MethodPut: + op = &item.Put + case http.MethodPatch: + op = &item.Patch + case http.MethodHead: + op = &item.Head + case http.MethodOptions: + op = &item.Options + } + return +} - } +func convertFromSpecificToPrimitive(typeName string) (string, error) { + name := typeName + if strings.ContainsRune(name, '.') { + name = strings.Split(name, ".")[1] + } + switch strings.ToUpper(name) { + case "TIME", "OBJECTID", "UUID": + return STRING, nil + case "DECIMAL": + return NUMBER, nil + } + + return typeName, ErrFailedConvertPrimitiveType +} + +func (parser *Parser) getTypeSchema(typeName string, file *ast.File, ref bool) (*spec.Schema, error) { + if IsGolangPrimitiveType(typeName) { + return PrimitiveSchema(TransToValidSchemeType(typeName)), nil + } + + schemaType, err := convertFromSpecificToPrimitive(typeName) + if err == nil { + return PrimitiveSchema(schemaType), nil + } + + typeSpecDef := parser.packages.FindTypeSpec(typeName, file, parser.ParseDependency) + if typeSpecDef == nil { + return nil, fmt.Errorf("cannot find type definition: %s", typeName) + } + + schema, ok := parser.parsedSchemas[typeSpecDef] + if !ok { + var err error + schema, err = parser.ParseDefinition(typeSpecDef) + if err != nil { + if err == ErrRecursiveParseStruct && ref { + return parser.getRefTypeSchema(typeSpecDef, schema), nil } + + return nil, err } } - for _, importSpec := range astFile.Imports { - if importSpec.Name == nil { - continue - } + if ref && len(schema.Schema.Type) > 0 && schema.Schema.Type[0] == OBJECT { + return parser.getRefTypeSchema(typeSpecDef, schema), nil + } - alias := importSpec.Name.Name + return schema.Schema, nil +} - if _, ok := parser.ImportAliases[alias]; !ok { - parser.ImportAliases[alias] = make(map[string]*ast.ImportSpec) - } +func (parser *Parser) renameRefSchemas() { + if len(parser.toBeRenamedSchemas) == 0 { + return + } - importParts := strings.Split(strings.Trim(importSpec.Path.Value, "\""), "/") - importPkgName := importParts[len(importParts)-1] + // rename schemas in swagger.Definitions + for name, pkgPath := range parser.toBeRenamedSchemas { + if schema, ok := parser.swagger.Definitions[name]; ok { + delete(parser.swagger.Definitions, name) + name = parser.renameSchema(name, pkgPath) + parser.swagger.Definitions[name] = schema + } + } - parser.ImportAliases[alias][importPkgName] = importSpec + // rename URLs if match + for _, refURL := range parser.toBeRenamedRefURLs { + parts := strings.Split(refURL.Fragment, "/") + name := parts[len(parts)-1] + if pkgPath, ok := parser.toBeRenamedSchemas[name]; ok { + parts[len(parts)-1] = parser.renameSchema(name, pkgPath) + refURL.Fragment = strings.Join(parts, "/") + } } } -func (parser *Parser) isInStructStack(refTypeName string) bool { - for _, structName := range parser.structStack { - if refTypeName == structName { - return true +func (parser *Parser) renameSchema(name, pkgPath string) string { + parts := strings.Split(name, ".") + name = fullTypeName(pkgPath, parts[len(parts)-1]) + name = strings.ReplaceAll(name, "/", "_") + + return name +} + +func (parser *Parser) getRefTypeSchema(typeSpecDef *TypeSpecDef, schema *Schema) *spec.Schema { + _, ok := parser.outputSchemas[typeSpecDef] + if !ok { + existSchema, ok := parser.existSchemaNames[schema.Name] + if ok { + // store the first one to be renamed after parsing over + _, ok = parser.toBeRenamedSchemas[existSchema.Name] + if !ok { + parser.toBeRenamedSchemas[existSchema.Name] = existSchema.PkgPath + } + // rename not the first one + schema.Name = parser.renameSchema(schema.Name, schema.PkgPath) + } else { + parser.existSchemaNames[schema.Name] = schema } + parser.swagger.Definitions[schema.Name] = spec.Schema{} + + if schema.Schema != nil { + parser.swagger.Definitions[schema.Name] = *schema.Schema + } + + parser.outputSchemas[typeSpecDef] = schema } - return false + + refSchema := RefSchema(schema.Name) + // store every URL + parser.toBeRenamedRefURLs = append(parser.toBeRenamedRefURLs, refSchema.Ref.GetURL()) + + return refSchema } -// parseDefinitions parses Swagger Api definitions. -func (parser *Parser) parseDefinitions() error { - // sort the typeNames so that parsing definitions is deterministic - typeNames := make([]string, 0, len(parser.registerTypes)) - for refTypeName := range parser.registerTypes { - typeNames = append(typeNames, refTypeName) - } - sort.Strings(typeNames) - - for _, refTypeName := range typeNames { - typeSpec := parser.registerTypes[refTypeName] - ss := strings.Split(refTypeName, ".") - pkgName := ss[0] - parser.structStack = nil - if err := parser.ParseDefinition(pkgName, typeSpec.Name.Name, typeSpec); err != nil { - return err +func (parser *Parser) isInStructStack(typeSpecDef *TypeSpecDef) bool { + for _, specDef := range parser.structStack { + if typeSpecDef == specDef { + return true } } - return nil + + return false } // ParseDefinition parses given type spec that corresponds to the type under // given name and package, and populates swagger schema definitions registry // with a schema for the given type -func (parser *Parser) ParseDefinition(pkgName, typeName string, typeSpec *ast.TypeSpec) error { - refTypeName := TypeDocName(pkgName, typeSpec) +func (parser *Parser) ParseDefinition(typeSpecDef *TypeSpecDef) (*Schema, error) { + typeName := typeSpecDef.FullName() + refTypeName := TypeDocName(typeName, typeSpecDef.TypeSpec) - if typeSpec == nil { - Println("Skipping '" + refTypeName + "', pkg '" + pkgName + "' not found, try add flag --parseDependency or --parseVendor.") - return nil - } + schema, ok := parser.parsedSchemas[typeSpecDef] + if ok { + parser.debug.Printf("Skipping '%s', already parsed.", typeName) - if _, isParsed := parser.swagger.Definitions[refTypeName]; isParsed { - Println("Skipping '" + refTypeName + "', already parsed.") - return nil + return schema, nil } - if parser.isInStructStack(refTypeName) { - Println("Skipping '" + refTypeName + "', recursion detected.") - return nil + if parser.isInStructStack(typeSpecDef) { + parser.debug.Printf("Skipping '%s', recursion detected.", typeName) + + return &Schema{ + Name: refTypeName, + PkgPath: typeSpecDef.PkgPath, + Schema: PrimitiveSchema(OBJECT), + }, + ErrRecursiveParseStruct } - parser.structStack = append(parser.structStack, refTypeName) + parser.structStack = append(parser.structStack, typeSpecDef) - Println("Generating " + refTypeName) + parser.debug.Printf("Generating %s", typeName) - schema, err := parser.parseTypeExpr(pkgName, typeName, typeSpec.Type) + definition, err := parser.parseTypeExpr(typeSpecDef.File, typeSpecDef.TypeSpec.Type, false) if err != nil { - return err - } - parser.swagger.Definitions[refTypeName] = *schema - return nil -} - -func (parser *Parser) collectRequiredFields(pkgName string, properties map[string]spec.Schema, extraRequired []string) (requiredFields []string) { - // created sorted list of properties keys so when we iterate over them it's deterministic - ks := make([]string, 0, len(properties)) - for k := range properties { - ks = append(ks, k) + return nil, err } - sort.Strings(ks) - - requiredFields = make([]string, 0) - // iterate over keys list instead of map to avoid the random shuffle of the order that go does for maps - for _, k := range ks { - prop := properties[k] - - // todo find the pkgName of the property type - tname := prop.SchemaProps.Type[0] - if _, ok := parser.TypeDefinitions[pkgName][tname]; ok { - tspec := parser.TypeDefinitions[pkgName][tname] - parser.ParseDefinition(pkgName, tname, tspec) - } - requiredFields = append(requiredFields, prop.SchemaProps.Required...) - properties[k] = prop + s := Schema{ + Name: refTypeName, + PkgPath: typeSpecDef.PkgPath, + Schema: definition, } + parser.parsedSchemas[typeSpecDef] = &s - if extraRequired != nil { - requiredFields = append(requiredFields, extraRequired...) + // update an empty schema as a result of recursion + s2, ok := parser.outputSchemas[typeSpecDef] + if ok { + parser.swagger.Definitions[s2.Name] = *definition } - sort.Strings(requiredFields) - - return + return &s, nil } func fullTypeName(pkgName, typeName string) string { if pkgName != "" { return pkgName + "." + typeName } + return typeName } // parseTypeExpr parses given type expression that corresponds to the type under // given name and package, and returns swagger schema for it. -func (parser *Parser) parseTypeExpr(pkgName, typeName string, typeExpr ast.Expr) (*spec.Schema, error) { - +func (parser *Parser) parseTypeExpr(file *ast.File, typeExpr ast.Expr, ref bool) (*spec.Schema, error) { switch expr := typeExpr.(type) { + // type Foo interface{} + case *ast.InterfaceType: + return &spec.Schema{}, nil + // type Foo struct {...} case *ast.StructType: - if typedef, ok := parser.TypeDefinitions[pkgName][typeName]; ok { - refTypeName := TypeDocName(pkgName, typedef) - if schema, isParsed := parser.swagger.Definitions[refTypeName]; isParsed { - return &schema, nil - } - } - - return parser.parseStruct(pkgName, expr.Fields) + return parser.parseStruct(file, expr.Fields) // type Foo Baz case *ast.Ident: - if IsGolangPrimitiveType(expr.Name) { - return &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: spec.StringOrArray{TransToValidSchemeType(expr.Name)}, - }, - }, nil - } - refTypeName := fullTypeName(pkgName, expr.Name) - if typedef, ok := parser.TypeDefinitions[pkgName][expr.Name]; ok { - refTypeName = TypeDocName(pkgName, typedef) - if _, isParsed := parser.swagger.Definitions[refTypeName]; !isParsed { - parser.ParseDefinition(pkgName, expr.Name, typedef) - } - } - - return &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: spec.Ref{ - Ref: jsonreference.MustCreateRef("#/definitions/" + refTypeName), - }, - }, - }, nil + return parser.getTypeSchema(expr.Name, file, ref) // type Foo *Baz case *ast.StarExpr: - return parser.parseTypeExpr(pkgName, typeName, expr.X) - - // type Foo []Baz - case *ast.ArrayType: - itemSchema, err := parser.parseTypeExpr(pkgName, "", expr.Elt) - if err != nil { - return &spec.Schema{}, err - } - return &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: itemSchema, - }, - }, - }, nil + return parser.parseTypeExpr(file, expr.X, ref) // type Foo pkg.Bar case *ast.SelectorExpr: if xIdent, ok := expr.X.(*ast.Ident); ok { - return parser.parseTypeExpr(xIdent.Name, expr.Sel.Name, expr.Sel) + return parser.getTypeSchema(fullTypeName(xIdent.Name, expr.Sel.Name), file, ref) + } + // type Foo []Baz + case *ast.ArrayType: + itemSchema, err := parser.parseTypeExpr(file, expr.Elt, true) + if err != nil { + return nil, err } + return spec.ArrayProperty(itemSchema), nil // type Foo map[string]Bar case *ast.MapType: - var valueSchema spec.SchemaOrBool if _, ok := expr.Value.(*ast.InterfaceType); ok { - valueSchema.Allows = true - } else { - schema, err := parser.parseTypeExpr(pkgName, "", expr.Value) - if err != nil { - return &spec.Schema{}, err - } - valueSchema.Schema = schema + return spec.MapProperty(nil), nil } - return &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &valueSchema, - }, - }, nil + schema, err := parser.parseTypeExpr(file, expr.Value, true) + if err != nil { + return nil, err + } + + return spec.MapProperty(schema), nil + + case *ast.FuncType: + return nil, ErrFuncTypeField // ... default: - Printf("Type definition of type '%T' is not supported yet. Using 'object' instead.\n", typeExpr) + parser.debug.Printf("Type definition of type '%T' is not supported yet. Using 'object' instead.\n", typeExpr) } - return &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - }, - }, nil + return PrimitiveSchema(OBJECT), nil } -func (parser *Parser) parseStruct(pkgName string, fields *ast.FieldList) (*spec.Schema, error) { - - extraRequired := make([]string, 0) +func (parser *Parser) parseStruct(file *ast.File, fields *ast.FieldList) (*spec.Schema, error) { + required := make([]string, 0) properties := make(map[string]spec.Schema) for _, field := range fields.List { - fieldProps, requiredFromAnon, err := parser.parseStructField(pkgName, field) + fieldProps, requiredFromAnon, err := parser.parseStructField(file, field) if err != nil { - return &spec.Schema{}, err + if err == ErrFuncTypeField { + continue + } + + return nil, err } - extraRequired = append(extraRequired, requiredFromAnon...) + if len(fieldProps) == 0 { + continue + } + required = append(required, requiredFromAnon...) for k, v := range fieldProps { properties[k] = v } } - // collect requireds from our properties and anonymous fields - required := parser.collectRequiredFields(pkgName, properties, extraRequired) - - // unset required from properties because we've collected them - for k, prop := range properties { - prop.SchemaProps.Required = make([]string, 0) - properties[k] = prop - } + sort.Strings(required) return &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, + Type: []string{OBJECT}, Properties: properties, Required: required, - }}, nil -} - -type structField struct { - name string - desc string - schemaType string - arrayType string - formatType string - isRequired bool - readOnly bool - crossPkg string - exampleValue interface{} - maximum *float64 - minimum *float64 - maxLength *int64 - minLength *int64 - enums []interface{} - defaultValue interface{} - extensions map[string]interface{} -} - -func (sf *structField) toStandardSchema() *spec.Schema { - required := make([]string, 0) - if sf.isRequired { - required = append(required, sf.name) - } - return &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{sf.schemaType}, - Description: sf.desc, - Format: sf.formatType, - Required: required, - Maximum: sf.maximum, - Minimum: sf.minimum, - MaxLength: sf.maxLength, - MinLength: sf.minLength, - Enum: sf.enums, - Default: sf.defaultValue, - }, - SwaggerSchemaProps: spec.SwaggerSchemaProps{ - Example: sf.exampleValue, - ReadOnly: sf.readOnly, - }, - VendorExtensible: spec.VendorExtensible{ - Extensions: sf.extensions, }, - } + }, nil } -func (parser *Parser) parseStructField(pkgName string, field *ast.Field) (map[string]spec.Schema, []string, error) { - properties := map[string]spec.Schema{} - +func (parser *Parser) parseStructField(file *ast.File, field *ast.Field) (map[string]spec.Schema, []string, error) { if field.Names == nil { - fullTypeName, err := getFieldType(field.Type) - if err != nil { - return properties, []string{}, nil + if field.Tag != nil { + skip, ok := reflect.StructTag(strings.ReplaceAll(field.Tag.Value, "`", "")).Lookup("swaggerignore") + if ok && strings.EqualFold(skip, "true") { + return nil, nil, nil + } } - typeName := fullTypeName - - if splits := strings.Split(fullTypeName, "."); len(splits) > 1 { - pkgName = splits[0] - typeName = splits[1] + typeName, err := getFieldType(field.Type) + if err != nil { + return nil, nil, err } - - typeSpec := parser.TypeDefinitions[pkgName][typeName] - if typeSpec == nil { - // Check if the pkg name is an alias and try to define type spec using real package name - if aliases, ok := parser.ImportAliases[pkgName]; ok { - for alias := range aliases { - typeSpec = parser.TypeDefinitions[alias][typeName] - if typeSpec != nil { - break - } - } - } + schema, err := parser.getTypeSchema(typeName, file, false) + if err != nil { + return nil, nil, err } - if typeSpec != nil { - schema, err := parser.parseTypeExpr(pkgName, typeName, typeSpec.Type) - if err != nil { - return properties, []string{}, err - } - schemaType := "unknown" - if len(schema.SchemaProps.Type) > 0 { - schemaType = schema.SchemaProps.Type[0] + if len(schema.Type) > 0 && schema.Type[0] == OBJECT { + if len(schema.Properties) == 0 { + return nil, nil, nil } - switch schemaType { - case "object": - for k, v := range schema.SchemaProps.Properties { - properties[k] = v - } - case "array": - properties[typeName] = *schema - default: - Printf("Can't extract properties from a schema of type '%s'", schemaType) + properties := map[string]spec.Schema{} + for k, v := range schema.Properties { + properties[k] = v } + return properties, schema.SchemaProps.Required, nil } - return properties, nil, nil + // for alias type of non-struct types ,such as array,map, etc. ignore field tag. + return map[string]spec.Schema{typeName: *schema}, nil, nil } - structField, err := parser.parseField(pkgName, field) + ps := parser.fieldParserFactory(parser, field) + + ok, err := ps.ShouldSkip() if err != nil { - return properties, nil, err + return nil, nil, err } - if structField.name == "" { - return properties, nil, nil + if ok { + return nil, nil, nil } - // TODO: find package of schemaType and/or arrayType - if structField.crossPkg != "" { - pkgName = structField.crossPkg + fieldName, err := ps.FieldName() + if err != nil { + return nil, nil, err } - fillObject := func(src, dest interface{}) error { - bin, err := json.Marshal(src) - if err != nil { - return err - } - return json.Unmarshal(bin, dest) + schema, err := ps.CustomSchema() + if err != nil { + return nil, nil, err } - - //for spec.Schema have implemented json.Marshaler, here in another way to convert - fillSchema := func(src, dest *spec.Schema) error { - err = fillObject(&src.SchemaProps, &dest.SchemaProps) - if err != nil { - return err + if schema == nil { + typeName, err := getFieldType(field.Type) + if err == nil { + // named type + schema, err = parser.getTypeSchema(typeName, file, true) + } else { + // unnamed type + schema, err = parser.parseTypeExpr(file, field.Type, false) } - err = fillObject(&src.SwaggerSchemaProps, &dest.SwaggerSchemaProps) if err != nil { - return err + return nil, nil, err } - return fillObject(&src.VendorExtensible, &dest.VendorExtensible) } - if typeSpec, ok := parser.TypeDefinitions[pkgName][structField.schemaType]; ok { // user type field - // write definition if not yet present - err = parser.ParseDefinition(pkgName, structField.schemaType, typeSpec) - if err != nil { - return properties, nil, err - } - required := make([]string, 0) - if structField.isRequired { - required = append(required, structField.name) - } - properties[structField.name] = spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, // to avoid swagger validation error - Description: structField.desc, - Required: required, - Ref: spec.Ref{ - Ref: jsonreference.MustCreateRef("#/definitions/" + TypeDocName(pkgName, typeSpec)), - }, - }, - SwaggerSchemaProps: spec.SwaggerSchemaProps{ - ReadOnly: structField.readOnly, - }, - } - } else if structField.schemaType == "array" { // array field type - // if defined -- ref it - if typeSpec, ok := parser.TypeDefinitions[pkgName][structField.arrayType]; ok { // user type in array - parser.ParseDefinition(pkgName, structField.arrayType, - parser.TypeDefinitions[pkgName][structField.arrayType]) - required := make([]string, 0) - if structField.isRequired { - required = append(required, structField.name) - } - properties[structField.name] = spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{structField.schemaType}, - Description: structField.desc, - Required: required, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: spec.Ref{ - Ref: jsonreference.MustCreateRef("#/definitions/" + TypeDocName(pkgName, typeSpec)), - }, - }, - }, - }, - }, - SwaggerSchemaProps: spec.SwaggerSchemaProps{ - ReadOnly: structField.readOnly, - }, - } - } else if structField.arrayType == "object" { - // Anonymous struct - if astTypeArray, ok := field.Type.(*ast.ArrayType); ok { // if array - props := make(map[string]spec.Schema) - if expr, ok := astTypeArray.Elt.(*ast.StructType); ok { - for _, field := range expr.Fields.List { - var fieldProps map[string]spec.Schema - fieldProps, _, err = parser.parseStructField(pkgName, field) - if err != nil { - return properties, nil, err - } - for k, v := range fieldProps { - props[k] = v - } - } - properties[structField.name] = spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{structField.schemaType}, - Description: structField.desc, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: props, - }, - }, - }, - }, - SwaggerSchemaProps: spec.SwaggerSchemaProps{ - ReadOnly: structField.readOnly, - }, - } - } else { - schema, _ := parser.parseTypeExpr(pkgName, "", astTypeArray.Elt) - properties[structField.name] = spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{structField.schemaType}, - Description: structField.desc, - Items: &spec.SchemaOrArray{ - Schema: schema, - }, - }, - SwaggerSchemaProps: spec.SwaggerSchemaProps{ - ReadOnly: structField.readOnly, - }, - } - } - } - } else if structField.arrayType == "array" { - if astTypeArray, ok := field.Type.(*ast.ArrayType); ok { - schema, _ := parser.parseTypeExpr(pkgName, "", astTypeArray.Elt) - properties[structField.name] = spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{structField.schemaType}, - Description: structField.desc, - Items: &spec.SchemaOrArray{ - Schema: schema, - }, - }, - SwaggerSchemaProps: spec.SwaggerSchemaProps{ - ReadOnly: structField.readOnly, - }, - } - } - } else { - // standard type in array - required := make([]string, 0) - if structField.isRequired { - required = append(required, structField.name) - } - - properties[structField.name] = spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{structField.schemaType}, - Description: structField.desc, - Format: structField.formatType, - Required: required, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{structField.arrayType}, - Maximum: structField.maximum, - Minimum: structField.minimum, - MaxLength: structField.maxLength, - MinLength: structField.minLength, - Enum: structField.enums, - Default: structField.defaultValue, - }, - }, - }, - }, - SwaggerSchemaProps: spec.SwaggerSchemaProps{ - Example: structField.exampleValue, - ReadOnly: structField.readOnly, - }, - } - } - } else if astTypeMap, ok := field.Type.(*ast.MapType); ok { // if map - stdSchema := structField.toStandardSchema() - mapValueSchema, err := parser.parseTypeExpr(pkgName, "", astTypeMap) - if err != nil { - return properties, nil, err - } - stdSchema.Type = mapValueSchema.Type - stdSchema.AdditionalProperties = mapValueSchema.AdditionalProperties - properties[structField.name] = *stdSchema - } else { - stdSchema := structField.toStandardSchema() - properties[structField.name] = *stdSchema - - if nestStar, ok := field.Type.(*ast.StarExpr); ok { - if !IsGolangPrimitiveType(structField.schemaType) { - schema, err := parser.parseTypeExpr(pkgName, structField.schemaType, nestStar.X) - if err != nil { - return properties, nil, err - } + err = ps.ComplementSchema(schema) + if err != nil { + return nil, nil, err + } - if len(schema.SchemaProps.Type) > 0 { - err = fillSchema(schema, stdSchema) - if err != nil { - return properties, nil, err - } - properties[structField.name] = *stdSchema - return properties, nil, nil - } - } - } else if nestStruct, ok := field.Type.(*ast.StructType); ok { - props := map[string]spec.Schema{} - nestRequired := make([]string, 0) - for _, v := range nestStruct.Fields.List { - p, _, err := parser.parseStructField(pkgName, v) - if err != nil { - return properties, nil, err - } - for k, v := range p { - if v.SchemaProps.Type[0] != "object" { - nestRequired = append(nestRequired, v.SchemaProps.Required...) - v.SchemaProps.Required = make([]string, 0) - } - props[k] = v - } - } - stdSchema.Properties = props - stdSchema.Required = nestRequired - properties[structField.name] = *stdSchema - } + var tagRequired []string + required, err := ps.IsRequired() + if err != nil { + return nil, nil, err + } + if required { + tagRequired = append(tagRequired, fieldName) } - return properties, nil, nil -} -func getFieldType(field interface{}) (string, error) { + return map[string]spec.Schema{fieldName: *schema}, tagRequired, nil +} - switch ftype := field.(type) { +func getFieldType(field ast.Expr) (string, error) { + switch fieldType := field.(type) { case *ast.Ident: - return ftype.Name, nil - + return fieldType.Name, nil case *ast.SelectorExpr: - packageName, err := getFieldType(ftype.X) + packageName, err := getFieldType(fieldType.X) if err != nil { return "", err } - return fmt.Sprintf("%s.%s", packageName, ftype.Sel.Name), nil + + return fullTypeName(packageName, fieldType.Sel.Name), nil case *ast.StarExpr: - fullName, err := getFieldType(ftype.X) + fullName, err := getFieldType(fieldType.X) if err != nil { return "", err } - return fullName, nil - - } - return "", fmt.Errorf("unknown field type %#v", field) -} - -func (parser *Parser) parseField(pkgName string, field *ast.Field) (*structField, error) { - prop, err := getPropertyName(pkgName, field.Type, parser) - if err != nil { - return nil, err - } - - if len(prop.ArrayType) == 0 { - if err := CheckSchemaType(prop.SchemaType); err != nil { - return nil, err - } - } else { - if err := CheckSchemaType("array"); err != nil { - return nil, err - } - } - // Skip func fields. - if prop.SchemaType == "func" { - return &structField{name: ""}, nil - } - - // Skip non-exported fields. - if !ast.IsExported(field.Names[0].Name) { - return &structField{name: ""}, nil - } - - structField := &structField{ - name: field.Names[0].Name, - schemaType: prop.SchemaType, - arrayType: prop.ArrayType, - crossPkg: prop.CrossPkg, - } - - switch parser.PropNamingStrategy { - case SnakeCase: - structField.name = toSnakeCase(structField.name) - case PascalCase: - //use struct field name - case CamelCase: - structField.name = toLowerCamelCase(structField.name) + return fullName, nil default: - structField.name = toLowerCamelCase(structField.name) - } - - if field.Doc != nil { - structField.desc = strings.TrimSpace(field.Doc.Text()) - } - if structField.desc == "" && field.Comment != nil { - structField.desc = strings.TrimSpace(field.Comment.Text()) - } - - if field.Tag == nil { - return structField, nil + return "", fmt.Errorf("unknown field type %#v", field) } - // `json:"tag"` -> json:"tag" - structTag := reflect.StructTag(strings.Replace(field.Tag.Value, "`", "", -1)) - - if ignoreTag := structTag.Get("swaggerignore"); ignoreTag == "true" { - structField.name = "" - return structField, nil - } - - jsonTag := structTag.Get("json") - hasStringTag := false - // json:"tag,hoge" - if strings.Contains(jsonTag, ",") { - // json:"name,string" or json:",string" - if strings.Contains(jsonTag, ",string") { - hasStringTag = true - } - - // json:",hoge" - if strings.HasPrefix(jsonTag, ",") { - jsonTag = "" - } else { - jsonTag = strings.SplitN(jsonTag, ",", 2)[0] - } - } - if jsonTag == "-" { - structField.name = "" - return structField, nil - } else if jsonTag != "" { - structField.name = jsonTag - } - - if typeTag := structTag.Get("swaggertype"); typeTag != "" { - parts := strings.Split(typeTag, ",") - if 0 < len(parts) && len(parts) <= 2 { - newSchemaType := parts[0] - newArrayType := structField.arrayType - if len(parts) >= 2 { - if newSchemaType == "array" { - newArrayType = parts[1] - if err := CheckSchemaType(newArrayType); err != nil { - return nil, err - } - } else if newSchemaType == "primitive" { - newSchemaType = parts[1] - newArrayType = parts[1] - } - } - - if err := CheckSchemaType(newSchemaType); err != nil { - return nil, err - } +} - structField.schemaType = newSchemaType - structField.arrayType = newArrayType - } - } - if exampleTag := structTag.Get("example"); exampleTag != "" { - if hasStringTag { - // then the example must be in string format - structField.exampleValue = exampleTag - } else { - example, err := defineTypeOfExample(structField.schemaType, structField.arrayType, exampleTag) - if err != nil { - return nil, err - } - structField.exampleValue = example - } - } - if formatTag := structTag.Get("format"); formatTag != "" { - structField.formatType = formatTag - } - if bindingTag := structTag.Get("binding"); bindingTag != "" { - for _, val := range strings.Split(bindingTag, ",") { - if val == "required" { - structField.isRequired = true - break - } - } - } - if validateTag := structTag.Get("validate"); validateTag != "" { - for _, val := range strings.Split(validateTag, ",") { - if val == "required" { - structField.isRequired = true - break - } - } +// GetSchemaTypePath get path of schema type. +func (parser *Parser) GetSchemaTypePath(schema *spec.Schema, depth int) []string { + if schema == nil || depth == 0 { + return nil } - if extensionsTag := structTag.Get("extensions"); extensionsTag != "" { - structField.extensions = map[string]interface{}{} - for _, val := range strings.Split(extensionsTag, ",") { - parts := strings.SplitN(val, "=", 2) - if len(parts) == 2 { - structField.extensions[parts[0]] = parts[1] - } else { - structField.extensions[parts[0]] = true + name := schema.Ref.String() + if name != "" { + if pos := strings.LastIndexByte(name, '/'); pos >= 0 { + name = name[pos+1:] + if schema, ok := parser.swagger.Definitions[name]; ok { + return parser.GetSchemaTypePath(&schema, depth) } } - } - if enumsTag := structTag.Get("enums"); enumsTag != "" { - enumType := structField.schemaType - if structField.schemaType == "array" { - enumType = structField.arrayType - } - for _, e := range strings.Split(enumsTag, ",") { - value, err := defineType(enumType, e) - if err != nil { - return nil, err - } - structField.enums = append(structField.enums, value) - } - } - if defaultTag := structTag.Get("default"); defaultTag != "" { - value, err := defineType(structField.schemaType, defaultTag) - if err != nil { - return nil, err - } - structField.defaultValue = value - } - - if IsNumericType(structField.schemaType) || IsNumericType(structField.arrayType) { - maximum, err := getFloatTag(structTag, "maximum") - if err != nil { - return nil, err - } - structField.maximum = maximum - - minimum, err := getFloatTag(structTag, "minimum") - if err != nil { - return nil, err - } - structField.minimum = minimum - } - if structField.schemaType == "string" || structField.arrayType == "string" { - maxLength, err := getIntTag(structTag, "maxLength") - if err != nil { - return nil, err - } - structField.maxLength = maxLength - - minLength, err := getIntTag(structTag, "minLength") - if err != nil { - return nil, err - } - structField.minLength = minLength - } - if readOnly := structTag.Get("readonly"); readOnly != "" { - structField.readOnly = readOnly == "true" + return nil } + if len(schema.Type) > 0 { + switch schema.Type[0] { + case ARRAY: + depth-- + s := []string{schema.Type[0]} - // perform this after setting everything else (min, max, etc...) - if hasStringTag { - - // @encoding/json: "It applies only to fields of string, floating point, integer, or boolean types." - defaultValues := map[string]string{ - // Zero Values as string - "string": "", - "integer": "0", - "boolean": "false", - "number": "0", - } - - if defaultValue, ok := defaultValues[structField.schemaType]; ok { - structField.schemaType = "string" + return append(s, parser.GetSchemaTypePath(schema.Items.Schema, depth)...) + case OBJECT: + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + // for map + depth-- + s := []string{schema.Type[0]} - if structField.exampleValue == nil { - // if exampleValue is not defined by the user, - // we will force an example with a correct value - // (eg: int->"0", bool:"false") - structField.exampleValue = defaultValue + return append(s, parser.GetSchemaTypePath(schema.AdditionalProperties.Schema, depth)...) } } - } - - return structField, nil -} - -func replaceLastTag(slice []spec.Tag, element spec.Tag) { - slice = slice[:len(slice)-1] - slice = append(slice, element) -} -func getFloatTag(structTag reflect.StructTag, tagName string) (*float64, error) { - strValue := structTag.Get(tagName) - if strValue == "" { - return nil, nil + return []string{schema.Type[0]} } - value, err := strconv.ParseFloat(strValue, 64) - if err != nil { - return nil, fmt.Errorf("can't parse numeric value of %q tag: %v", tagName, err) - } - - return &value, nil + return []string{ANY} } -func getIntTag(structTag reflect.StructTag, tagName string) (*int64, error) { - strValue := structTag.Get(tagName) - if strValue == "" { - return nil, nil - } - - value, err := strconv.ParseInt(strValue, 10, 64) - if err != nil { - return nil, fmt.Errorf("can't parse numeric value of %q tag: %v", tagName, err) - } - - return &value, nil -} - -func toSnakeCase(in string) string { - runes := []rune(in) - length := len(runes) - - var out []rune - for i := 0; i < length; i++ { - if i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) { - out = append(out, '_') - } - out = append(out, unicode.ToLower(runes[i])) - } - return string(out) -} - -func toLowerCamelCase(in string) string { - runes := []rune(in) - - var out []rune - flag := false - for i, curr := range runes { - if (i == 0 && unicode.IsUpper(curr)) || (flag && unicode.IsUpper(curr)) { - out = append(out, unicode.ToLower(curr)) - flag = true - } else { - out = append(out, curr) - flag = false - } - } - - return string(out) +func replaceLastTag(slice []spec.Tag, element spec.Tag) { + slice = append(slice[:len(slice)-1], element) } // defineTypeOfExample example value define the type (object and array unsupported) func defineTypeOfExample(schemaType, arrayType, exampleValue string) (interface{}, error) { switch schemaType { - case "string": + case STRING: return exampleValue, nil - case "number": + case NUMBER: v, err := strconv.ParseFloat(exampleValue, 64) if err != nil { return nil, fmt.Errorf("example value %s can't convert to %s err: %s", exampleValue, schemaType, err) } + return v, nil - case "integer": + case INTEGER: v, err := strconv.Atoi(exampleValue) if err != nil { return nil, fmt.Errorf("example value %s can't convert to %s err: %s", exampleValue, schemaType, err) } + return v, nil - case "boolean": + case BOOLEAN: v, err := strconv.ParseBool(exampleValue) if err != nil { return nil, fmt.Errorf("example value %s can't convert to %s err: %s", exampleValue, schemaType, err) } + return v, nil - case "array": + case ARRAY: values := strings.Split(exampleValue, ",") result := make([]interface{}, 0) for _, value := range values { @@ -1488,15 +1203,51 @@ func defineTypeOfExample(schemaType, arrayType, exampleValue string) (interface{ } result = append(result, v) } + + return result, nil + case OBJECT: + if arrayType == "" { + return nil, fmt.Errorf("%s is unsupported type in example value `%s`", schemaType, exampleValue) + } + + values := strings.Split(exampleValue, ",") + result := map[string]interface{}{} + for _, value := range values { + mapData := strings.Split(value, ":") + + if len(mapData) == 2 { + v, err := defineTypeOfExample(arrayType, "", mapData[1]) + if err != nil { + return nil, err + } + result[mapData[0]] = v + } else { + return nil, fmt.Errorf("example value %s should format: key:value", exampleValue) + } + } + return result, nil - default: - return nil, fmt.Errorf("%s is unsupported type in example value", schemaType) } + + return nil, fmt.Errorf("%s is unsupported type in example value %s", schemaType, exampleValue) } // GetAllGoFileInfo gets all Go source files information for given searchDir. -func (parser *Parser) getAllGoFileInfo(searchDir string) error { - return filepath.Walk(searchDir, parser.visit) +func (parser *Parser) getAllGoFileInfo(packageDir, searchDir string) error { + return filepath.Walk(searchDir, func(path string, f os.FileInfo, _ error) error { + if err := parser.Skip(path, f); err != nil { + return err + } else if f.IsDir() { + return nil + } + + relPath, err := filepath.Rel(searchDir, path) + if err != nil { + return err + } + + return parser.parseFile(filepath.ToSlash(filepath.Dir(filepath.Clean(filepath.Join(packageDir, relPath)))), path, nil) + }) } func (parser *Parser) getAllGoFileInfoFromDeps(pkg *depth.Pkg) error { @@ -1510,7 +1261,7 @@ func (parser *Parser) getAllGoFileInfoFromDeps(pkg *depth.Pkg) error { return nil } srcDir := pkg.Raw.Dir - files, err := ioutil.ReadDir(srcDir) // only parsing files in the dir(don't contains sub dir files) + files, err := ioutil.ReadDir(srcDir) // only parsing files in the dir(don't contain sub dir files) if err != nil { return err } @@ -1521,7 +1272,7 @@ func (parser *Parser) getAllGoFileInfoFromDeps(pkg *depth.Pkg) error { } path := filepath.Join(srcDir, f.Name()) - if err := parser.parseFile(path); err != nil { + if err := parser.parseFile(pkg.Name, path, nil); err != nil { return err } } @@ -1535,31 +1286,60 @@ func (parser *Parser) getAllGoFileInfoFromDeps(pkg *depth.Pkg) error { return nil } -func (parser *Parser) visit(path string, f os.FileInfo, err error) error { - if err := parser.Skip(path, f); err != nil { +func (parser *Parser) parseFile(packageDir, path string, src interface{}) error { + if strings.HasSuffix(strings.ToLower(path), "_test.go") || filepath.Ext(path) != ".go" { + return nil + } + + // positions are relative to FileSet + astFile, err := goparser.ParseFile(token.NewFileSet(), path, src, goparser.ParseComments) + if err != nil { + return fmt.Errorf("ParseFile error:%+v", err) + } + + err = parser.packages.CollectAstFile(packageDir, path, astFile) + if err != nil { return err } - return parser.parseFile(path) + + return nil } -func (parser *Parser) parseFile(path string) error { - if ext := filepath.Ext(path); ext == ".go" { - fset := token.NewFileSet() // positions are relative to fset - astFile, err := goparser.ParseFile(fset, path, nil, goparser.ParseComments) - if err != nil { - return fmt.Errorf("ParseFile error:%+v", err) +func (parser *Parser) checkOperationIDUniqueness() error { + // operationsIds contains all operationId annotations to check it's unique + operationsIds := make(map[string]string) + + for path, item := range parser.swagger.Paths.Paths { + var method, id string + for method = range allMethod { + op := refRouteMethodOp(&item, method) + if *op != nil { + id = (**op).ID + break + } + } + if id == "" { + continue } - parser.files[path] = astFile + current := fmt.Sprintf("%s %s", method, path) + previous, ok := operationsIds[id] + if ok { + return fmt.Errorf( + "duplicated @id annotation '%s' found in '%s', previously declared in: '%s'", + id, current, previous) + } + operationsIds[id] = current } + return nil } -// Skip returns filepath.SkipDir error if match vendor and hidden folder +// Skip returns filepath.SkipDir error if match vendor and hidden folder. func (parser *Parser) Skip(path string, f os.FileInfo) error { if f.IsDir() { - if !parser.ParseVendor && f.Name() == "vendor" || //ignore "vendor" - f.Name() == "docs" || //exclude docs + if !parser.ParseVendor && f.Name() == "vendor" || // ignore "vendor" + f.Name() == "docs" || // exclude docs len(f.Name()) > 1 && f.Name()[0] == '.' { // exclude all hidden folder return filepath.SkipDir } @@ -1578,3 +1358,14 @@ func (parser *Parser) Skip(path string, f os.FileInfo) error { func (parser *Parser) GetSwagger() *spec.Swagger { return parser.swagger } + +// addTestType just for tests. +func (parser *Parser) addTestType(typename string) { + typeDef := &TypeSpecDef{} + parser.packages.uniqueDefinitions[typename] = typeDef + parser.parsedSchemas[typeDef] = &Schema{ + PkgPath: "", + Name: typename, + Schema: PrimitiveSchema(OBJECT), + } +} diff --git a/vendor/github.com/swaggo/swag/property.go b/vendor/github.com/swaggo/swag/property.go deleted file mode 100644 index 6d456327..00000000 --- a/vendor/github.com/swaggo/swag/property.go +++ /dev/null @@ -1,139 +0,0 @@ -package swag - -import ( - "errors" - "fmt" - "go/ast" - "strings" -) - -// ErrFailedConvertPrimitiveType Failed to convert for swag to interpretable type -var ErrFailedConvertPrimitiveType = errors.New("swag property: failed convert primitive type") - -type propertyName struct { - SchemaType string - ArrayType string - CrossPkg string -} - -type propertyNewFunc func(schemeType string, crossPkg string) propertyName - -func newArrayProperty(schemeType string, crossPkg string) propertyName { - return propertyName{ - SchemaType: "array", - ArrayType: schemeType, - CrossPkg: crossPkg, - } -} - -func newProperty(schemeType string, crossPkg string) propertyName { - return propertyName{ - SchemaType: schemeType, - ArrayType: "string", - CrossPkg: crossPkg, - } -} - -func convertFromSpecificToPrimitive(typeName string) (string, error) { - typeName = strings.ToUpper(typeName) - switch typeName { - case "TIME", "OBJECTID", "UUID": - return "string", nil - case "DECIMAL": - return "number", nil - } - return "", ErrFailedConvertPrimitiveType -} - -func parseFieldSelectorExpr(astTypeSelectorExpr *ast.SelectorExpr, parser *Parser, propertyNewFunc propertyNewFunc) propertyName { - if primitiveType, err := convertFromSpecificToPrimitive(astTypeSelectorExpr.Sel.Name); err == nil { - return propertyNewFunc(primitiveType, "") - } - - if pkgName, ok := astTypeSelectorExpr.X.(*ast.Ident); ok { - if typeDefinitions, ok := parser.TypeDefinitions[pkgName.Name][astTypeSelectorExpr.Sel.Name]; ok { - if expr, ok := typeDefinitions.Type.(*ast.SelectorExpr); ok { - if primitiveType, err := convertFromSpecificToPrimitive(expr.Sel.Name); err == nil { - return propertyNewFunc(primitiveType, "") - } - } - parser.ParseDefinition(pkgName.Name, astTypeSelectorExpr.Sel.Name, typeDefinitions) - return propertyNewFunc(astTypeSelectorExpr.Sel.Name, pkgName.Name) - } - if aliasedNames, ok := parser.ImportAliases[pkgName.Name]; ok { - for aliasedName := range aliasedNames { - if typeDefinitions, ok := parser.TypeDefinitions[aliasedName][astTypeSelectorExpr.Sel.Name]; ok { - if expr, ok := typeDefinitions.Type.(*ast.SelectorExpr); ok { - if primitiveType, err := convertFromSpecificToPrimitive(expr.Sel.Name); err == nil { - return propertyNewFunc(primitiveType, "") - } - } - parser.ParseDefinition(aliasedName, astTypeSelectorExpr.Sel.Name, typeDefinitions) - return propertyNewFunc(astTypeSelectorExpr.Sel.Name, aliasedName) - } - } - } - name := fmt.Sprintf("%s.%v", pkgName, astTypeSelectorExpr.Sel.Name) - if actualPrimitiveType, isCustomType := parser.CustomPrimitiveTypes[name]; isCustomType { - return propertyName{SchemaType: actualPrimitiveType, ArrayType: actualPrimitiveType} - } - } - return propertyName{SchemaType: "string", ArrayType: "string"} -} - -// getPropertyName returns the string value for the given field if it exists -// allowedValues: array, boolean, integer, null, number, object, string -func getPropertyName(pkgName string, expr ast.Expr, parser *Parser) (propertyName, error) { - switch tp := expr.(type) { - case *ast.SelectorExpr: - return parseFieldSelectorExpr(tp, parser, newProperty), nil - case *ast.StarExpr: - return getPropertyName(pkgName, tp.X, parser) - case *ast.ArrayType: - return getArrayPropertyName(pkgName, tp.Elt, parser), nil - case *ast.MapType, *ast.StructType, *ast.InterfaceType: - return propertyName{SchemaType: "object", ArrayType: "object"}, nil - case *ast.FuncType: - return propertyName{SchemaType: "func", ArrayType: ""}, nil - case *ast.Ident: - name := tp.Name - // check if it is a custom type - if actualPrimitiveType, isCustomType := parser.CustomPrimitiveTypes[fullTypeName(pkgName, name)]; isCustomType { - return propertyName{SchemaType: actualPrimitiveType, ArrayType: actualPrimitiveType}, nil - } - - name = TransToValidSchemeType(name) - return propertyName{SchemaType: name, ArrayType: name}, nil - default: - return propertyName{}, errors.New("not supported" + fmt.Sprint(expr)) - } -} - -func getArrayPropertyName(pkgName string, astTypeArrayElt ast.Expr, parser *Parser) propertyName { - switch elt := astTypeArrayElt.(type) { - case *ast.StructType, *ast.MapType, *ast.InterfaceType: - return propertyName{SchemaType: "array", ArrayType: "object"} - case *ast.ArrayType: - return propertyName{SchemaType: "array", ArrayType: "array"} - case *ast.StarExpr: - return getArrayPropertyName(pkgName, elt.X, parser) - case *ast.SelectorExpr: - return parseFieldSelectorExpr(elt, parser, newArrayProperty) - case *ast.Ident: - name := elt.Name - if actualPrimitiveType, isCustomType := parser.CustomPrimitiveTypes[fullTypeName(pkgName, name)]; isCustomType { - name = actualPrimitiveType - } else { - name = TransToValidSchemeType(elt.Name) - } - return propertyName{SchemaType: "array", ArrayType: name} - default: - name := fmt.Sprintf("%s", astTypeArrayElt) - if actualPrimitiveType, isCustomType := parser.CustomPrimitiveTypes[fullTypeName(pkgName, name)]; isCustomType { - name = actualPrimitiveType - } else { - name = TransToValidSchemeType(name) - } - return propertyName{SchemaType: "array", ArrayType: name} - } -} diff --git a/vendor/github.com/swaggo/swag/schema.go b/vendor/github.com/swaggo/swag/schema.go index 539340fa..8f25d6f7 100644 --- a/vendor/github.com/swaggo/swag/schema.go +++ b/vendor/github.com/swaggo/swag/schema.go @@ -1,65 +1,92 @@ package swag import ( + "errors" "fmt" "go/ast" "strings" + + "github.com/go-openapi/spec" +) + +const ( + // ARRAY represent a array value. + ARRAY = "array" + // OBJECT represent a object value. + OBJECT = "object" + // PRIMITIVE represent a primitive value. + PRIMITIVE = "primitive" + // BOOLEAN represent a boolean value. + BOOLEAN = "boolean" + // INTEGER represent a integer value. + INTEGER = "integer" + // NUMBER represent a number value. + NUMBER = "number" + // STRING represent a string value. + STRING = "string" + // FUNC represent a function value. + FUNC = "func" + // ANY represent a any value. + ANY = "any" + // NIL represent a empty value. + NIL = "nil" ) -// CheckSchemaType checks if typeName is not a name of primitive type +// CheckSchemaType checks if typeName is not a name of primitive type. func CheckSchemaType(typeName string) error { if !IsPrimitiveType(typeName) { return fmt.Errorf("%s is not basic types", typeName) } + return nil } -// IsSimplePrimitiveType determine whether the type name is a simple primitive type +// IsSimplePrimitiveType determine whether the type name is a simple primitive type. func IsSimplePrimitiveType(typeName string) bool { switch typeName { - case "string", "number", "integer", "boolean": + case STRING, NUMBER, INTEGER, BOOLEAN: return true - default: - return false } + + return false } -// IsPrimitiveType determine whether the type name is a primitive type +// IsPrimitiveType determine whether the type name is a primitive type. func IsPrimitiveType(typeName string) bool { switch typeName { - case "string", "number", "integer", "boolean", "array", "object", "func": + case STRING, NUMBER, INTEGER, BOOLEAN, ARRAY, OBJECT, FUNC: return true - default: - return false } + + return false } -// IsNumericType determines whether the swagger type name is a numeric type +// IsNumericType determines whether the swagger type name is a numeric type. func IsNumericType(typeName string) bool { - return typeName == "integer" || typeName == "number" + return typeName == INTEGER || typeName == NUMBER } // TransToValidSchemeType indicates type will transfer golang basic type to swagger supported type. func TransToValidSchemeType(typeName string) string { switch typeName { case "uint", "int", "uint8", "int8", "uint16", "int16", "byte": - return "integer" + return INTEGER case "uint32", "int32", "rune": - return "integer" + return INTEGER case "uint64", "int64": - return "integer" + return INTEGER case "float32", "float64": - return "number" + return NUMBER case "bool": - return "boolean" + return BOOLEAN case "string": - return "string" - default: - return typeName // to support user defined types + return STRING } + + return typeName } -// IsGolangPrimitiveType determine whether the type name is a golang primitive type +// IsGolangPrimitiveType determine whether the type name is a golang primitive type. func IsGolangPrimitiveType(typeName string) bool { switch typeName { case "uint", @@ -79,30 +106,27 @@ func IsGolangPrimitiveType(typeName string) bool { "bool", "string": return true - default: - return false } + + return false } -// TransToValidCollectionFormat determine valid collection format +// TransToValidCollectionFormat determine valid collection format. func TransToValidCollectionFormat(format string) string { switch format { case "csv", "multi", "pipes", "tsv", "ssv": return format - default: - return "" } + + return "" } -// TypeDocName get alias from comment '// @name ', otherwise the original type name to display in doc +// TypeDocName get alias from comment '// @name ', otherwise the original type name to display in doc. func TypeDocName(pkgName string, spec *ast.TypeSpec) string { if spec != nil { if spec.Comment != nil { for _, comment := range spec.Comment.List { - text := strings.TrimSpace(comment.Text) - text = strings.TrimLeft(text, "//") - text = strings.TrimSpace(text) - texts := strings.Split(text, " ") + texts := strings.Split(strings.TrimSpace(strings.TrimLeft(comment.Text, "/")), " ") if len(texts) > 1 && strings.ToLower(texts[0]) == "@name" { return texts[1] } @@ -115,3 +139,56 @@ func TypeDocName(pkgName string, spec *ast.TypeSpec) string { return pkgName } + +// RefSchema build a reference schema. +func RefSchema(refType string) *spec.Schema { + return spec.RefSchema("#/definitions/" + refType) +} + +// PrimitiveSchema build a primitive schema. +func PrimitiveSchema(refType string) *spec.Schema { + return &spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{refType}}} +} + +// BuildCustomSchema build custom schema specified by tag swaggertype. +func BuildCustomSchema(types []string) (*spec.Schema, error) { + if len(types) == 0 { + return nil, nil + } + + switch types[0] { + case PRIMITIVE: + if len(types) == 1 { + return nil, errors.New("need primitive type after primitive") + } + + return BuildCustomSchema(types[1:]) + case ARRAY: + if len(types) == 1 { + return nil, errors.New("need array item type after array") + } + schema, err := BuildCustomSchema(types[1:]) + if err != nil { + return nil, err + } + + return spec.ArrayProperty(schema), nil + case OBJECT: + if len(types) == 1 { + return PrimitiveSchema(types[0]), nil + } + schema, err := BuildCustomSchema(types[1:]) + if err != nil { + return nil, err + } + + return spec.MapProperty(schema), nil + default: + err := CheckSchemaType(types[0]) + if err != nil { + return nil, err + } + + return PrimitiveSchema(types[0]), nil + } +} diff --git a/vendor/github.com/swaggo/swag/swagger.go b/vendor/github.com/swaggo/swag/swagger.go index 38c5ebfc..c00feb22 100644 --- a/vendor/github.com/swaggo/swag/swagger.go +++ b/vendor/github.com/swaggo/swag/swagger.go @@ -2,6 +2,7 @@ package swag import ( "errors" + "fmt" "sync" ) @@ -10,10 +11,10 @@ const Name = "swagger" var ( swaggerMu sync.RWMutex - swag Swagger + swags map[string]Swagger ) -// Swagger is a interface to read swagger document. +// Swagger is an interface to read swagger document. type Swagger interface { ReadDoc() string } @@ -26,16 +27,35 @@ func Register(name string, swagger Swagger) { panic("swagger is nil") } - if swag != nil { + if swags == nil { + swags = make(map[string]Swagger) + } + + if _, ok := swags[name]; ok { panic("Register called twice for swag: " + name) } - swag = swagger + swags[name] = swagger } -// ReadDoc reads swagger document. -func ReadDoc() (string, error) { - if swag != nil { - return swag.ReadDoc(), nil +// ReadDoc reads swagger document. An optional name parameter can be passed to read a specific document. +// The default name is "swagger". +func ReadDoc(optionalName ...string) (string, error) { + swaggerMu.RLock() + defer swaggerMu.RUnlock() + + if swags == nil { + return "", errors.New("no swag has yet been registered") + } + + name := Name + if len(optionalName) != 0 && optionalName[0] != "" { + name = optionalName[0] + } + + swag, ok := swags[name] + if !ok { + return "", fmt.Errorf("no swag named \"%s\" was registered", name) } - return "", errors.New("not yet registered swag") + + return swag.ReadDoc(), nil } diff --git a/vendor/github.com/swaggo/swag/types.go b/vendor/github.com/swaggo/swag/types.go new file mode 100644 index 00000000..f65a62e0 --- /dev/null +++ b/vendor/github.com/swaggo/swag/types.go @@ -0,0 +1,60 @@ +package swag + +import ( + "go/ast" + + "github.com/go-openapi/spec" +) + +// Schema parsed schema. +type Schema struct { + *spec.Schema // + PkgPath string // package import path used to rename Name of a definition int case of conflict + Name string // Name in definitions +} + +// TypeSpecDef the whole information of a typeSpec. +type TypeSpecDef struct { + // ast file where TypeSpec is + File *ast.File + + // the TypeSpec of this type definition + TypeSpec *ast.TypeSpec + + // path of package starting from under ${GOPATH}/src or from module path in go.mod + PkgPath string +} + +// Name the name of the typeSpec. +func (t *TypeSpecDef) Name() string { + return t.TypeSpec.Name.Name +} + +// FullName full name of the typeSpec. +func (t *TypeSpecDef) FullName() string { + return fullTypeName(t.File.Name.Name, t.TypeSpec.Name.Name) +} + +// AstFileInfo information of an ast.File. +type AstFileInfo struct { + // File ast.File + File *ast.File + + // Path the path of the ast.File + Path string + + // PackagePath package import path of the ast.File + PackagePath string +} + +// PackageDefinitions files and definition in a package. +type PackageDefinitions struct { + // files in this package, map key is file's relative path starting package path + Files map[string]*ast.File + + // definitions in this package, map key is typeName + TypeDefinitions map[string]*TypeSpecDef + + // package name + Name string +} diff --git a/vendor/github.com/swaggo/swag/version.go b/vendor/github.com/swaggo/swag/version.go index d322f013..6e3ad085 100644 --- a/vendor/github.com/swaggo/swag/version.go +++ b/vendor/github.com/swaggo/swag/version.go @@ -1,4 +1,4 @@ package swag -// Version of swag -const Version = "v1.6.7" +// Version of swag. +const Version = "v1.7.6" diff --git a/vendor/github.com/ugorji/go/codec/go.mod b/vendor/github.com/ugorji/go/codec/go.mod deleted file mode 100644 index 7fcabb47..00000000 --- a/vendor/github.com/ugorji/go/codec/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/ugorji/go/codec - -go 1.11 - -require github.com/ugorji/go v1.2.6 diff --git a/vendor/github.com/vmihailenco/bufpool/go.mod b/vendor/github.com/vmihailenco/bufpool/go.mod deleted file mode 100644 index 7f3096ae..00000000 --- a/vendor/github.com/vmihailenco/bufpool/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/vmihailenco/bufpool - -go 1.13 - -require ( - github.com/kr/pretty v0.1.0 // indirect - github.com/stretchr/testify v1.5.1 - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect -) diff --git a/vendor/github.com/vmihailenco/bufpool/go.sum b/vendor/github.com/vmihailenco/bufpool/go.sum deleted file mode 100644 index 6074473a..00000000 --- a/vendor/github.com/vmihailenco/bufpool/go.sum +++ /dev/null @@ -1,17 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/vmihailenco/go-tinylfu/go.mod b/vendor/github.com/vmihailenco/go-tinylfu/go.mod deleted file mode 100644 index b32dfecb..00000000 --- a/vendor/github.com/vmihailenco/go-tinylfu/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/vmihailenco/go-tinylfu - -go 1.15 - -require github.com/cespare/xxhash/v2 v2.1.1 diff --git a/vendor/github.com/vmihailenco/go-tinylfu/go.sum b/vendor/github.com/vmihailenco/go-tinylfu/go.sum deleted file mode 100644 index 504f6aab..00000000 --- a/vendor/github.com/vmihailenco/go-tinylfu/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= diff --git a/vendor/github.com/vmihailenco/msgpack/v5/go.mod b/vendor/github.com/vmihailenco/msgpack/v5/go.mod deleted file mode 100644 index f630a54b..00000000 --- a/vendor/github.com/vmihailenco/msgpack/v5/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/vmihailenco/msgpack/v5 - -go 1.11 - -require ( - github.com/stretchr/testify v1.6.1 - github.com/vmihailenco/tagparser/v2 v2.0.0 -) diff --git a/vendor/github.com/vmihailenco/msgpack/v5/go.sum b/vendor/github.com/vmihailenco/msgpack/v5/go.sum deleted file mode 100644 index a2bef4a3..00000000 --- a/vendor/github.com/vmihailenco/msgpack/v5/go.sum +++ /dev/null @@ -1,13 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= -github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/vmihailenco/tagparser/v2/go.mod b/vendor/github.com/vmihailenco/tagparser/v2/go.mod deleted file mode 100644 index b3a64bf7..00000000 --- a/vendor/github.com/vmihailenco/tagparser/v2/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/vmihailenco/tagparser/v2 - -go 1.15 diff --git a/vendor/github.com/yuin/gopher-lua/_state.go b/vendor/github.com/yuin/gopher-lua/_state.go index 2bdf3490..960e8810 100644 --- a/vendor/github.com/yuin/gopher-lua/_state.go +++ b/vendor/github.com/yuin/gopher-lua/_state.go @@ -3,7 +3,6 @@ package lua import ( "context" "fmt" - "github.com/yuin/gopher-lua/parse" "io" "math" "os" @@ -12,6 +11,8 @@ import ( "sync" "sync/atomic" "time" + + "github.com/yuin/gopher-lua/parse" ) const MultRet = -1 @@ -935,18 +936,22 @@ func (ls *LState) initCallFrame(cf *callFrame) { // +inline-start proto := cf.Fn.Proto nargs := cf.NArgs np := int(proto.NumParameters) - newSize := cf.LocalBase + np - // +inline-call ls.reg.checkSize newSize - for i := nargs; i < np; i++ { - ls.reg.array[cf.LocalBase+i] = LNil + if nargs < np { + // default any missing arguments to nil + newSize := cf.LocalBase + np + // +inline-call ls.reg.checkSize newSize + for i := nargs; i < np; i++ { + ls.reg.array[cf.LocalBase+i] = LNil + } nargs = np + ls.reg.top = newSize } if (proto.IsVarArg & VarArgIsVarArg) == 0 { if nargs < int(proto.NumUsedRegisters) { nargs = int(proto.NumUsedRegisters) } - newSize = cf.LocalBase + nargs + newSize := cf.LocalBase + nargs // +inline-call ls.reg.checkSize newSize for i := np; i < nargs; i++ { ls.reg.array[cf.LocalBase+i] = LNil @@ -1212,6 +1217,10 @@ func NewState(opts ...Options) *LState { return ls } +func (ls *LState) IsClosed() bool { + return ls.stack == nil +} + func (ls *LState) Close() { atomic.AddInt32(&ls.stop, 1) for _, file := range ls.G.tempFiles { @@ -2016,7 +2025,7 @@ func (ls *LState) SetMx(mx int) { go func() { limit := uint64(mx * 1024 * 1024) //MB var s runtime.MemStats - for ls.stop == 0 { + for atomic.LoadInt32(&ls.stop) == 0 { runtime.ReadMemStats(&s) if s.Alloc >= limit { fmt.Println("out of memory") diff --git a/vendor/github.com/yuin/gopher-lua/go.mod b/vendor/github.com/yuin/gopher-lua/go.mod deleted file mode 100644 index e5a8841f..00000000 --- a/vendor/github.com/yuin/gopher-lua/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/yuin/gopher-lua - -require ( - github.com/chzyer/logex v1.1.10 // indirect - github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e - github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect - golang.org/x/sys v0.0.0-20190204203706-41f3e6584952 // indirect -) diff --git a/vendor/github.com/yuin/gopher-lua/go.sum b/vendor/github.com/yuin/gopher-lua/go.sum deleted file mode 100644 index ca60bd9c..00000000 --- a/vendor/github.com/yuin/gopher-lua/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -golang.org/x/sys v0.0.0-20190204203706-41f3e6584952 h1:FDfvYgoVsA7TTZSbgiqjAbfPbK47CNHdWl3h/PJtii0= -golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/yuin/gopher-lua/oslib.go b/vendor/github.com/yuin/gopher-lua/oslib.go index e1865689..c70a99bf 100644 --- a/vendor/github.com/yuin/gopher-lua/oslib.go +++ b/vendor/github.com/yuin/gopher-lua/oslib.go @@ -15,9 +15,24 @@ func init() { func getIntField(L *LState, tb *LTable, key string, v int) int { ret := tb.RawGetString(key) - if ln, ok := ret.(LNumber); ok { - return int(ln) + + switch lv := ret.(type) { + case LNumber: + return int(lv) + case LString: + slv := string(lv) + slv = strings.TrimLeft(slv, " ") + if strings.HasPrefix(slv, "0") && !strings.HasPrefix(slv, "0x") && !strings.HasPrefix(slv, "0X") { + //Standard lua interpreter only support decimal and hexadecimal + slv = strings.TrimLeft(slv, "0") + } + if num, err := parseNumber(slv); err == nil { + return int(num) + } + default: + return v } + return v } diff --git a/vendor/github.com/yuin/gopher-lua/state.go b/vendor/github.com/yuin/gopher-lua/state.go index 0edd7436..a1ee672e 100644 --- a/vendor/github.com/yuin/gopher-lua/state.go +++ b/vendor/github.com/yuin/gopher-lua/state.go @@ -7,7 +7,6 @@ package lua import ( "context" "fmt" - "github.com/yuin/gopher-lua/parse" "io" "math" "os" @@ -16,6 +15,8 @@ import ( "sync" "sync/atomic" "time" + + "github.com/yuin/gopher-lua/parse" ) const MultRet = -1 @@ -981,26 +982,30 @@ func (ls *LState) initCallFrame(cf *callFrame) { // +inline-start proto := cf.Fn.Proto nargs := cf.NArgs np := int(proto.NumParameters) - newSize := cf.LocalBase + np - // this section is inlined by go-inline - // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' - { - rg := ls.reg - requiredSize := newSize - if requiredSize > cap(rg.array) { - rg.resize(requiredSize) + if nargs < np { + // default any missing arguments to nil + newSize := cf.LocalBase + np + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + rg := ls.reg + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + for i := nargs; i < np; i++ { + ls.reg.array[cf.LocalBase+i] = LNil } - } - for i := nargs; i < np; i++ { - ls.reg.array[cf.LocalBase+i] = LNil nargs = np + ls.reg.top = newSize } if (proto.IsVarArg & VarArgIsVarArg) == 0 { if nargs < int(proto.NumUsedRegisters) { nargs = int(proto.NumUsedRegisters) } - newSize = cf.LocalBase + nargs + newSize := cf.LocalBase + nargs // this section is inlined by go-inline // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' { @@ -1089,26 +1094,30 @@ func (ls *LState) pushCallFrame(cf callFrame, fn LValue, meta bool) { // +inline proto := cf.Fn.Proto nargs := cf.NArgs np := int(proto.NumParameters) - newSize := cf.LocalBase + np - // this section is inlined by go-inline - // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' - { - rg := ls.reg - requiredSize := newSize - if requiredSize > cap(rg.array) { - rg.resize(requiredSize) + if nargs < np { + // default any missing arguments to nil + newSize := cf.LocalBase + np + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + rg := ls.reg + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + for i := nargs; i < np; i++ { + ls.reg.array[cf.LocalBase+i] = LNil } - } - for i := nargs; i < np; i++ { - ls.reg.array[cf.LocalBase+i] = LNil nargs = np + ls.reg.top = newSize } if (proto.IsVarArg & VarArgIsVarArg) == 0 { if nargs < int(proto.NumUsedRegisters) { nargs = int(proto.NumUsedRegisters) } - newSize = cf.LocalBase + nargs + newSize := cf.LocalBase + nargs // this section is inlined by go-inline // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' { @@ -1367,6 +1376,10 @@ func NewState(opts ...Options) *LState { return ls } +func (ls *LState) IsClosed() bool { + return ls.stack == nil +} + func (ls *LState) Close() { atomic.AddInt32(&ls.stop, 1) for _, file := range ls.G.tempFiles { @@ -2171,7 +2184,7 @@ func (ls *LState) SetMx(mx int) { go func() { limit := uint64(mx * 1024 * 1024) //MB var s runtime.MemStats - for ls.stop == 0 { + for atomic.LoadInt32(&ls.stop) == 0 { runtime.ReadMemStats(&s) if s.Alloc >= limit { fmt.Println("out of memory") diff --git a/vendor/github.com/yuin/gopher-lua/utils.go b/vendor/github.com/yuin/gopher-lua/utils.go index 1467931b..2df68dc7 100644 --- a/vendor/github.com/yuin/gopher-lua/utils.go +++ b/vendor/github.com/yuin/gopher-lua/utils.go @@ -32,7 +32,7 @@ func defaultFormat(v interface{}, f fmt.State, c rune) { buf = append(buf, "%") for i := 0; i < 128; i++ { if f.Flag(i) { - buf = append(buf, string(i)) + buf = append(buf, string(rune(i))) } } diff --git a/vendor/github.com/yuin/gopher-lua/vm.go b/vendor/github.com/yuin/gopher-lua/vm.go index c3c17bdb..f3733f13 100644 --- a/vendor/github.com/yuin/gopher-lua/vm.go +++ b/vendor/github.com/yuin/gopher-lua/vm.go @@ -728,26 +728,30 @@ func init() { proto := cf.Fn.Proto nargs := cf.NArgs np := int(proto.NumParameters) - newSize := cf.LocalBase + np - // this section is inlined by go-inline - // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' - { - rg := ls.reg - requiredSize := newSize - if requiredSize > cap(rg.array) { - rg.resize(requiredSize) + if nargs < np { + // default any missing arguments to nil + newSize := cf.LocalBase + np + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + rg := ls.reg + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + for i := nargs; i < np; i++ { + ls.reg.array[cf.LocalBase+i] = LNil } - } - for i := nargs; i < np; i++ { - ls.reg.array[cf.LocalBase+i] = LNil nargs = np + ls.reg.top = newSize } if (proto.IsVarArg & VarArgIsVarArg) == 0 { if nargs < int(proto.NumUsedRegisters) { nargs = int(proto.NumUsedRegisters) } - newSize = cf.LocalBase + nargs + newSize := cf.LocalBase + nargs // this section is inlined by go-inline // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' { @@ -906,26 +910,30 @@ func init() { proto := cf.Fn.Proto nargs := cf.NArgs np := int(proto.NumParameters) - newSize := cf.LocalBase + np - // this section is inlined by go-inline - // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' - { - rg := ls.reg - requiredSize := newSize - if requiredSize > cap(rg.array) { - rg.resize(requiredSize) + if nargs < np { + // default any missing arguments to nil + newSize := cf.LocalBase + np + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + rg := ls.reg + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + for i := nargs; i < np; i++ { + ls.reg.array[cf.LocalBase+i] = LNil } - } - for i := nargs; i < np; i++ { - ls.reg.array[cf.LocalBase+i] = LNil nargs = np + ls.reg.top = newSize } if (proto.IsVarArg & VarArgIsVarArg) == 0 { if nargs < int(proto.NumUsedRegisters) { nargs = int(proto.NumUsedRegisters) } - newSize = cf.LocalBase + nargs + newSize := cf.LocalBase + nargs // this section is inlined by go-inline // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' { diff --git a/vendor/go.opentelemetry.io/otel/metric/go.mod b/vendor/go.opentelemetry.io/otel/metric/go.mod deleted file mode 100644 index 47bc47ba..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/go.mod +++ /dev/null @@ -1,54 +0,0 @@ -module go.opentelemetry.io/otel/metric - -go 1.14 - -replace go.opentelemetry.io/otel => ../ - -replace go.opentelemetry.io/otel/bridge/opencensus => ../bridge/opencensus - -replace go.opentelemetry.io/otel/bridge/opentracing => ../bridge/opentracing - -replace go.opentelemetry.io/otel/example/jaeger => ../example/jaeger - -replace go.opentelemetry.io/otel/example/namedtracer => ../example/namedtracer - -replace go.opentelemetry.io/otel/example/opencensus => ../example/opencensus - -replace go.opentelemetry.io/otel/example/otel-collector => ../example/otel-collector - -replace go.opentelemetry.io/otel/example/prom-collector => ../example/prom-collector - -replace go.opentelemetry.io/otel/example/prometheus => ../example/prometheus - -replace go.opentelemetry.io/otel/example/zipkin => ../example/zipkin - -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../exporters/metric/prometheus - -replace go.opentelemetry.io/otel/exporters/otlp => ../exporters/otlp - -replace go.opentelemetry.io/otel/exporters/stdout => ../exporters/stdout - -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../exporters/trace/jaeger - -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../exporters/trace/zipkin - -replace go.opentelemetry.io/otel/internal/tools => ../internal/tools - -replace go.opentelemetry.io/otel/metric => ./ - -replace go.opentelemetry.io/otel/oteltest => ../oteltest - -replace go.opentelemetry.io/otel/sdk => ../sdk - -replace go.opentelemetry.io/otel/sdk/export/metric => ../sdk/export/metric - -replace go.opentelemetry.io/otel/sdk/metric => ../sdk/metric - -replace go.opentelemetry.io/otel/trace => ../trace - -require ( - github.com/google/go-cmp v0.5.5 - github.com/stretchr/testify v1.7.0 - go.opentelemetry.io/otel v0.20.0 - go.opentelemetry.io/otel/oteltest v0.20.0 -) diff --git a/vendor/go.opentelemetry.io/otel/metric/go.sum b/vendor/go.opentelemetry.io/otel/metric/go.sum deleted file mode 100644 index b69f2e56..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/go.sum +++ /dev/null @@ -1,15 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/go.opentelemetry.io/otel/trace/go.mod b/vendor/go.opentelemetry.io/otel/trace/go.mod deleted file mode 100644 index 914e4f43..00000000 --- a/vendor/go.opentelemetry.io/otel/trace/go.mod +++ /dev/null @@ -1,53 +0,0 @@ -module go.opentelemetry.io/otel/trace - -go 1.14 - -replace go.opentelemetry.io/otel => ../ - -replace go.opentelemetry.io/otel/bridge/opencensus => ../bridge/opencensus - -replace go.opentelemetry.io/otel/bridge/opentracing => ../bridge/opentracing - -replace go.opentelemetry.io/otel/example/jaeger => ../example/jaeger - -replace go.opentelemetry.io/otel/example/namedtracer => ../example/namedtracer - -replace go.opentelemetry.io/otel/example/opencensus => ../example/opencensus - -replace go.opentelemetry.io/otel/example/otel-collector => ../example/otel-collector - -replace go.opentelemetry.io/otel/example/prom-collector => ../example/prom-collector - -replace go.opentelemetry.io/otel/example/prometheus => ../example/prometheus - -replace go.opentelemetry.io/otel/example/zipkin => ../example/zipkin - -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../exporters/metric/prometheus - -replace go.opentelemetry.io/otel/exporters/otlp => ../exporters/otlp - -replace go.opentelemetry.io/otel/exporters/stdout => ../exporters/stdout - -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../exporters/trace/jaeger - -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../exporters/trace/zipkin - -replace go.opentelemetry.io/otel/internal/tools => ../internal/tools - -replace go.opentelemetry.io/otel/metric => ../metric - -replace go.opentelemetry.io/otel/oteltest => ../oteltest - -replace go.opentelemetry.io/otel/sdk => ../sdk - -replace go.opentelemetry.io/otel/sdk/export/metric => ../sdk/export/metric - -replace go.opentelemetry.io/otel/sdk/metric => ../sdk/metric - -replace go.opentelemetry.io/otel/trace => ./ - -require ( - github.com/google/go-cmp v0.5.5 - github.com/stretchr/testify v1.7.0 - go.opentelemetry.io/otel v0.20.0 -) diff --git a/vendor/go.opentelemetry.io/otel/trace/go.sum b/vendor/go.opentelemetry.io/otel/trace/go.sum deleted file mode 100644 index b69f2e56..00000000 --- a/vendor/go.opentelemetry.io/otel/trace/go.sum +++ /dev/null @@ -1,15 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/go.uber.org/atomic/go.mod b/vendor/go.uber.org/atomic/go.mod deleted file mode 100644 index daa7599f..00000000 --- a/vendor/go.uber.org/atomic/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module go.uber.org/atomic - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/stretchr/testify v1.3.0 -) - -go 1.13 diff --git a/vendor/go.uber.org/atomic/go.sum b/vendor/go.uber.org/atomic/go.sum deleted file mode 100644 index 4f898415..00000000 --- a/vendor/go.uber.org/atomic/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/go.uber.org/automaxprocs/.codecov.yml b/vendor/go.uber.org/automaxprocs/.codecov.yml new file mode 100644 index 00000000..9a2ed4a9 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/.codecov.yml @@ -0,0 +1,14 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 90% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure diff --git a/vendor/go.uber.org/automaxprocs/.gitignore b/vendor/go.uber.org/automaxprocs/.gitignore new file mode 100644 index 00000000..dd7bcf51 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/.gitignore @@ -0,0 +1,33 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log +coverage.txt + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/automaxprocs/.travis.yml b/vendor/go.uber.org/automaxprocs/.travis.yml new file mode 100644 index 00000000..1464bd34 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/.travis.yml @@ -0,0 +1,24 @@ +language: go +sudo: false +go_import_path: go.uber.org/automaxprocs + +env: + global: + - GO111MODULE=on + +matrix: + include: + - go: oldstable + - go: stable + env: LINT=1 + +install: + - make install + +script: + - test -z "$LINT" || make lint + - make test + +after_success: + - make cover + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/automaxprocs/CHANGELOG.md b/vendor/go.uber.org/automaxprocs/CHANGELOG.md new file mode 100644 index 00000000..d3729f68 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CHANGELOG.md @@ -0,0 +1,26 @@ +# Changelog + +## v1.4.0 (2021-02-01) + +- Support colons in cgroup names. +- Remove linters from runtime dependencies. + +## v1.3.0 (2020-01-23) + +- Migrate to Go modules. + +## v1.2.0 (2018-02-22) + +- Fixed quota clamping to always round down rather than up; Rather than + guaranteeing constant throttling at saturation, instead assume that the + fractional CPU was added as a hedge for factors outside of Go's scheduler. + +## v1.1.0 (2017-11-10) + +- Log the new value of `GOMAXPROCS` rather than the current value. +- Make logs more explicit about whether `GOMAXPROCS` was modified or not. +- Allow customization of the minimum `GOMAXPROCS`, and modify default from 2 to 1. + +## v1.0.0 (2017-08-09) + +- Initial release. diff --git a/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..e327d9aa --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md new file mode 100644 index 00000000..2b6a6040 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md @@ -0,0 +1,81 @@ +# Contributing + +We'd love your help improving this package! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +``` +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/automaxprocs.git +cd automaxprocs +git remote add upstream https://github.com/uber-go/automaxprocs.git +git fetch upstream +``` + +Install the test dependencies: + +``` +make dependencies +``` + +Make sure that the tests and the linters pass: + +``` +make test +make lint +``` + +If you're not using the minor version of Go specified in the Makefile's +`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is +fine, but it means that you'll only discover lint failures after you open your +pull request. + +## Making Changes + +Start by creating a new branch for your changes: + +``` +cd $GOPATH/src/go.uber.org/automaxprocs +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +``` +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We *try* to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +* Add tests for new functionality. +* Write a [good commit message][commit-message]. +* Maintain backward compatibility. + +[fork]: https://github.com/uber-go/automaxprocs/fork +[open-issue]: https://github.com/uber-go/automaxprocs/issues/new +[cla]: https://cla-assistant.io/uber-go/automaxprocs +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/automaxprocs/LICENSE b/vendor/go.uber.org/automaxprocs/LICENSE new file mode 100644 index 00000000..20dcf51d --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/go.uber.org/automaxprocs/Makefile b/vendor/go.uber.org/automaxprocs/Makefile new file mode 100644 index 00000000..c5467720 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/Makefile @@ -0,0 +1,46 @@ +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +GOLINT = $(GOBIN)/golint +STATICCHECK = $(GOBIN)/staticcheck + +.PHONY: build +build: + go build ./... + +.PHONY: install +install: + go mod download + +.PHONY: test +test: + go test -race ./... + +.PHONY: cover +cover: + go test -coverprofile=cover.out -covermode=atomic -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html + +$(GOLINT): tools/go.mod + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): tools/go.mod + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +.PHONY: lint +lint: $(GOLINT) $(STATICCHECK) + @rm -rf lint.log + @echo "Checking gofmt" + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log + @echo "Checking go vet" + @go vet ./... 2>&1 | tee -a lint.log + @echo "Checking golint" + @$(GOLINT) ./... | tee -a lint.log + @echo "Checking staticcheck" + @$(STATICCHECK) ./... 2>&1 | tee -a lint.log + @echo "Checking for license headers..." + @./.build/check_license.sh | tee -a lint.log + @[ ! -s lint.log ] diff --git a/vendor/go.uber.org/automaxprocs/README.md b/vendor/go.uber.org/automaxprocs/README.md new file mode 100644 index 00000000..7ad608c6 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/README.md @@ -0,0 +1,46 @@ +# automaxprocs [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Automatically set `GOMAXPROCS` to match Linux container CPU quota. + +## Installation + +`go get -u go.uber.org/automaxprocs` + +## Quick Start + +```go +import _ "go.uber.org/automaxprocs" + +func main() { + // Your application logic here. +} +``` + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +automaxprocs to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The automaxprocs maintainers keep +an eye on issues and pull requests, but you can also report any negative +conduct to oss-conduct@uber.com. That email list is a private, safe space; +even the automaxprocs maintainers don't have access, so don't hesitate to hold +us to a high standard. + +
+ +Released under the [MIT License](LICENSE). + +[doc-img]: https://godoc.org/go.uber.org/automaxprocs?status.svg +[doc]: https://godoc.org/go.uber.org/automaxprocs +[ci-img]: https://travis-ci.com/uber-go/automaxprocs.svg?branch=master +[ci]: https://travis-ci.com/uber-go/automaxprocs +[cov-img]: https://codecov.io/gh/uber-go/automaxprocs/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/automaxprocs + + diff --git a/vendor/go.uber.org/automaxprocs/automaxprocs.go b/vendor/go.uber.org/automaxprocs/automaxprocs.go new file mode 100644 index 00000000..69946a3e --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/automaxprocs.go @@ -0,0 +1,33 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package automaxprocs automatically sets GOMAXPROCS to match the Linux +// container CPU quota, if any. +package automaxprocs // import "go.uber.org/automaxprocs" + +import ( + "log" + + "go.uber.org/automaxprocs/maxprocs" +) + +func init() { + maxprocs.Set(maxprocs.Logger(log.Printf)) +} diff --git a/vendor/go.uber.org/automaxprocs/glide.yaml b/vendor/go.uber.org/automaxprocs/glide.yaml new file mode 100644 index 00000000..d49aa7ab --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/glide.yaml @@ -0,0 +1,7 @@ +package: go.uber.org/automaxprocs +import: [] +testImport: +- package: github.com/stretchr/testify + version: ^1.1.4 + subpackages: + - assert diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go new file mode 100644 index 00000000..1257d0c9 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go @@ -0,0 +1,78 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import ( + "bufio" + "io" + "os" + "path/filepath" + "strconv" +) + +// CGroup represents the data structure for a Linux control group. +type CGroup struct { + path string +} + +// NewCGroup returns a new *CGroup from a given path. +func NewCGroup(path string) *CGroup { + return &CGroup{path: path} +} + +// Path returns the path of the CGroup*. +func (cg *CGroup) Path() string { + return cg.path +} + +// ParamPath returns the path of the given cgroup param under itself. +func (cg *CGroup) ParamPath(param string) string { + return filepath.Join(cg.path, param) +} + +// readFirstLine reads the first line from a cgroup param file. +func (cg *CGroup) readFirstLine(param string) (string, error) { + paramFile, err := os.Open(cg.ParamPath(param)) + if err != nil { + return "", err + } + defer paramFile.Close() + + scanner := bufio.NewScanner(paramFile) + if scanner.Scan() { + return scanner.Text(), nil + } + if err := scanner.Err(); err != nil { + return "", err + } + return "", io.ErrUnexpectedEOF +} + +// readInt parses the first line from a cgroup param file as int. +func (cg *CGroup) readInt(param string) (int, error) { + text, err := cg.readFirstLine(param) + if err != nil { + return 0, err + } + return strconv.Atoi(text) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go new file mode 100644 index 00000000..e2489b82 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go @@ -0,0 +1,117 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +const ( + // _cgroupFSType is the Linux CGroup file system type used in + // `/proc/$PID/mountinfo`. + _cgroupFSType = "cgroup" + // _cgroupSubsysCPU is the CPU CGroup subsystem. + _cgroupSubsysCPU = "cpu" + // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem. + _cgroupSubsysCPUAcct = "cpuacct" + // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem. + _cgroupSubsysCPUSet = "cpuset" + // _cgroupSubsysMemory is the Memory CGroup subsystem. + _cgroupSubsysMemory = "memory" + + // _cgroupCPUCFSQuotaUsParam is the file name for the CGroup CFS quota + // parameter. + _cgroupCPUCFSQuotaUsParam = "cpu.cfs_quota_us" + // _cgroupCPUCFSPeriodUsParam is the file name for the CGroup CFS period + // parameter. + _cgroupCPUCFSPeriodUsParam = "cpu.cfs_period_us" +) + +const ( + _procPathCGroup = "/proc/self/cgroup" + _procPathMountInfo = "/proc/self/mountinfo" +) + +// CGroups is a map that associates each CGroup with its subsystem name. +type CGroups map[string]*CGroup + +// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files +// under for some process under `/proc` file system (see also proc(5) for more +// information). +func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) { + cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + cgroups := make(CGroups) + newMountPoint := func(mp *MountPoint) error { + if mp.FSType != _cgroupFSType { + return nil + } + + for _, opt := range mp.SuperOptions { + subsys, exists := cgroupSubsystems[opt] + if !exists { + continue + } + + cgroupPath, err := mp.Translate(subsys.Name) + if err != nil { + return err + } + cgroups[opt] = NewCGroup(cgroupPath) + } + + return nil + } + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return nil, err + } + return cgroups, nil +} + +// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current +// process. +func NewCGroupsForCurrentProcess() (CGroups, error) { + return NewCGroups(_procPathMountInfo, _procPathCGroup) +} + +// CPUQuota returns the CPU quota applied with the CPU cgroup controller. +// It is a result of `cpu.cfs_quota_us / cpu.cfs_period_us`. If the value of +// `cpu.cfs_quota_us` was not set (-1), the method returns `(-1, nil)`. +func (cg CGroups) CPUQuota() (float64, bool, error) { + cpuCGroup, exists := cg[_cgroupSubsysCPU] + if !exists { + return -1, false, nil + } + + cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam) + if defined := cfsQuotaUs > 0; err != nil || !defined { + return -1, defined, err + } + + cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam) + if err != nil { + return -1, false, err + } + + return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go new file mode 100644 index 00000000..113555f6 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package cgroups provides utilities to access Linux control group (CGroups) +// parameters (CPU quota, for example) for a given process. +package cgroups diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go new file mode 100644 index 00000000..bad8d7ae --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go @@ -0,0 +1,51 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import "fmt" + +type cgroupSubsysFormatInvalidError struct { + line string +} + +type mountPointFormatInvalidError struct { + line string +} + +type pathNotExposedFromMountPointError struct { + mountPoint string + root string + path string +} + +func (err cgroupSubsysFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line) +} + +func (err mountPointFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for MountPoint: %q", err.line) +} + +func (err pathNotExposedFromMountPointError) Error() string { + return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go new file mode 100644 index 00000000..d6238d5c --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go @@ -0,0 +1,166 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" +) + +const ( + _mountInfoSep = " " + _mountInfoOptsSep = "," + _mountInfoOptionalFieldsSep = "-" +) + +const ( + _miFieldIDMountID = iota + _miFieldIDParentID + _miFieldIDDeviceID + _miFieldIDRoot + _miFieldIDMountPoint + _miFieldIDOptions + _miFieldIDOptionalFields + + _miFieldCountFirstHalf +) + +const ( + _miFieldOffsetFSType = iota + _miFieldOffsetMountSource + _miFieldOffsetSuperOptions + + _miFieldCountSecondHalf +) + +const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf + +// MountPoint is the data structure for the mount points in +// `/proc/$PID/mountinfo`. See also proc(5) for more information. +type MountPoint struct { + MountID int + ParentID int + DeviceID string + Root string + MountPoint string + Options []string + OptionalFields []string + FSType string + MountSource string + SuperOptions []string +} + +// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and +// returns a new *MountPoint. +func NewMountPointFromLine(line string) (*MountPoint, error) { + fields := strings.Split(line, _mountInfoSep) + + if len(fields) < _miFieldCountMin { + return nil, mountPointFormatInvalidError{line} + } + + mountID, err := strconv.Atoi(fields[_miFieldIDMountID]) + if err != nil { + return nil, err + } + + parentID, err := strconv.Atoi(fields[_miFieldIDParentID]) + if err != nil { + return nil, err + } + + for i, field := range fields[_miFieldIDOptionalFields:] { + if field == _mountInfoOptionalFieldsSep { + fsTypeStart := _miFieldIDOptionalFields + i + 1 + + if len(fields) != fsTypeStart+_miFieldCountSecondHalf { + return nil, mountPointFormatInvalidError{line} + } + + miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart + miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart + miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart + + return &MountPoint{ + MountID: mountID, + ParentID: parentID, + DeviceID: fields[_miFieldIDDeviceID], + Root: fields[_miFieldIDRoot], + MountPoint: fields[_miFieldIDMountPoint], + Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep), + OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)], + FSType: fields[miFieldIDFSType], + MountSource: fields[miFieldIDMountSource], + SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep), + }, nil + } + } + + return nil, mountPointFormatInvalidError{line} +} + +// Translate converts an absolute path inside the *MountPoint's file system to +// the host file system path in the mount namespace the *MountPoint belongs to. +func (mp *MountPoint) Translate(absPath string) (string, error) { + relPath, err := filepath.Rel(mp.Root, absPath) + + if err != nil { + return "", err + } + if relPath == ".." || strings.HasPrefix(relPath, "../") { + return "", pathNotExposedFromMountPointError{ + mountPoint: mp.MountPoint, + root: mp.Root, + path: absPath, + } + } + + return filepath.Join(mp.MountPoint, relPath), nil +} + +// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`) +// and yields parsed *MountPoint into newMountPoint. +func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error { + mountInfoFile, err := os.Open(procPathMountInfo) + if err != nil { + return err + } + defer mountInfoFile.Close() + + scanner := bufio.NewScanner(mountInfoFile) + + for scanner.Scan() { + mountPoint, err := NewMountPointFromLine(scanner.Text()) + if err != nil { + return err + } + if err := newMountPoint(mountPoint); err != nil { + return err + } + } + + return scanner.Err() +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go new file mode 100644 index 00000000..103e86f3 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go @@ -0,0 +1,102 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +const ( + _cgroupSep = ":" + _cgroupSubsysSep = "," +) + +const ( + _csFieldIDID = iota + _csFieldIDSubsystems + _csFieldIDName + _csFieldCount +) + +// CGroupSubsys represents the data structure for entities in +// `/proc/$PID/cgroup`. See also proc(5) for more information. +type CGroupSubsys struct { + ID int + Subsystems []string + Name string +} + +// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in +// the format of `/proc/$PID/cgroup` +func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) { + fields := strings.SplitN(line, _cgroupSep, _csFieldCount) + + if len(fields) != _csFieldCount { + return nil, cgroupSubsysFormatInvalidError{line} + } + + id, err := strconv.Atoi(fields[_csFieldIDID]) + if err != nil { + return nil, err + } + + cgroup := &CGroupSubsys{ + ID: id, + Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep), + Name: fields[_csFieldIDName], + } + + return cgroup, nil +} + +// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`) +// and returns a new map[string]*CGroupSubsys. +func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) { + cgroupFile, err := os.Open(procPathCGroup) + if err != nil { + return nil, err + } + defer cgroupFile.Close() + + scanner := bufio.NewScanner(cgroupFile) + subsystems := make(map[string]*CGroupSubsys) + + for scanner.Scan() { + cgroup, err := NewCGroupSubsysFromLine(scanner.Text()) + if err != nil { + return nil, err + } + for _, subsys := range cgroup.Subsystems { + subsystems[subsys] = cgroup + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return subsystems, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go new file mode 100644 index 00000000..37699c31 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go @@ -0,0 +1,49 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build linux + +package runtime + +import ( + "math" + + cg "go.uber.org/automaxprocs/internal/cgroups" +) + +// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process +// to a valid GOMAXPROCS value. +func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) { + cgroups, err := cg.NewCGroupsForCurrentProcess() + if err != nil { + return -1, CPUQuotaUndefined, err + } + + quota, defined, err := cgroups.CPUQuota() + if !defined || err != nil { + return -1, CPUQuotaUndefined, err + } + + maxProcs := int(math.Floor(quota)) + if minValue > 0 && maxProcs < minValue { + return minValue, CPUQuotaMinUsed, nil + } + return maxProcs, CPUQuotaUsed, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go new file mode 100644 index 00000000..5915a2ef --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go @@ -0,0 +1,30 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build !linux + +package runtime + +// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process +// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the +// current OS. +func CPUQuotaToGOMAXPROCS(_ int) (int, CPUQuotaStatus, error) { + return -1, CPUQuotaUndefined, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go new file mode 100644 index 00000000..3a751564 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go @@ -0,0 +1,33 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package runtime + +// CPUQuotaStatus presents the status of how CPU quota is used +type CPUQuotaStatus int + +const ( + // CPUQuotaUndefined is returned when CPU quota is undefined + CPUQuotaUndefined CPUQuotaStatus = iota + // CPUQuotaUsed is returned when a valid CPU quota can be used + CPUQuotaUsed + // CPUQuotaMinUsed is return when CPU quota is smaller than the min value + CPUQuotaMinUsed +) diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go new file mode 100644 index 00000000..ec032438 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go @@ -0,0 +1,130 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to +// match the configured Linux CPU quota. Unlike the top-level automaxprocs +// package, it lets the caller configure logging and handle errors. +package maxprocs // import "go.uber.org/automaxprocs/maxprocs" + +import ( + "os" + "runtime" + + iruntime "go.uber.org/automaxprocs/internal/runtime" +) + +const _maxProcsKey = "GOMAXPROCS" + +func currentMaxProcs() int { + return runtime.GOMAXPROCS(0) +} + +type config struct { + printf func(string, ...interface{}) + procs func(int) (int, iruntime.CPUQuotaStatus, error) + minGOMAXPROCS int +} + +func (c *config) log(fmt string, args ...interface{}) { + if c.printf != nil { + c.printf(fmt, args...) + } +} + +// An Option alters the behavior of Set. +type Option interface { + apply(*config) +} + +// Logger uses the supplied printf implementation for log output. By default, +// Set doesn't log anything. +func Logger(printf func(string, ...interface{})) Option { + return optionFunc(func(cfg *config) { + cfg.printf = printf + }) +} + +// Min sets the minimum GOMAXPROCS value that will be used. +// Any value below 1 is ignored. +func Min(n int) Option { + return optionFunc(func(cfg *config) { + if n >= 1 { + cfg.minGOMAXPROCS = n + } + }) +} + +type optionFunc func(*config) + +func (of optionFunc) apply(cfg *config) { of(cfg) } + +// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning +// any error encountered and an undo function. +// +// Set is a no-op on non-Linux systems and in Linux environments without a +// configured CPU quota. +func Set(opts ...Option) (func(), error) { + cfg := &config{ + procs: iruntime.CPUQuotaToGOMAXPROCS, + minGOMAXPROCS: 1, + } + for _, o := range opts { + o.apply(cfg) + } + + undoNoop := func() { + cfg.log("maxprocs: No GOMAXPROCS change to reset") + } + + // Honor the GOMAXPROCS environment variable if present. Otherwise, amend + // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is + // Linux, and guarantee a minimum value of 1. The minimum guaranteed value + // can be overriden using `maxprocs.Min()`. + if max, exists := os.LookupEnv(_maxProcsKey); exists { + cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max) + return undoNoop, nil + } + + maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS) + if err != nil { + return undoNoop, err + } + + if status == iruntime.CPUQuotaUndefined { + cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs()) + return undoNoop, nil + } + + prev := currentMaxProcs() + undo := func() { + cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev) + runtime.GOMAXPROCS(prev) + } + + switch status { + case iruntime.CPUQuotaMinUsed: + cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs) + case iruntime.CPUQuotaUsed: + cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs) + } + + runtime.GOMAXPROCS(maxProcs) + return undo, nil +} diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go new file mode 100644 index 00000000..d9ad8f4c --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/maxprocs/version.go @@ -0,0 +1,24 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package maxprocs + +// Version is the current package version. +const Version = "1.4.0" diff --git a/vendor/go.uber.org/multierr/go.mod b/vendor/go.uber.org/multierr/go.mod deleted file mode 100644 index 398d6c99..00000000 --- a/vendor/go.uber.org/multierr/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module go.uber.org/multierr - -go 1.14 - -require ( - github.com/stretchr/testify v1.7.0 - go.uber.org/atomic v1.7.0 - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect -) diff --git a/vendor/go.uber.org/multierr/go.sum b/vendor/go.uber.org/multierr/go.sum deleted file mode 100644 index 75edd735..00000000 --- a/vendor/go.uber.org/multierr/go.sum +++ /dev/null @@ -1,16 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 6c321012..794ee303 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,4 +1,28 @@ # Changelog +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## 1.19.1 (8 Sep 2021) + +### Fixed +* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. +* [#1003][]: JSON: Fix inaccurate precision when encoding float32. + +[#1001]: https://github.com/uber-go/zap/pull/1001 +[#1003]: https://github.com/uber-go/zap/pull/1003 + +## 1.19.0 (9 Aug 2021) + +Enhancements: +* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. +* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields + better. + +[#975]: https://github.com/uber-go/zap/pull/975 +[#984]: https://github.com/uber-go/zap/pull/984 + +Thanks to @lancoLiu and @thockin for their contributions to this release. ## 1.18.1 (28 Jun 2021) diff --git a/vendor/go.uber.org/zap/go.mod b/vendor/go.uber.org/zap/go.mod deleted file mode 100644 index 9455c99c..00000000 --- a/vendor/go.uber.org/zap/go.mod +++ /dev/null @@ -1,14 +0,0 @@ -module go.uber.org/zap - -go 1.13 - -require ( - github.com/benbjohnson/clock v1.1.0 - github.com/pkg/errors v0.8.1 - github.com/stretchr/testify v1.7.0 - go.uber.org/atomic v1.7.0 - go.uber.org/goleak v1.1.10 - go.uber.org/multierr v1.6.0 - gopkg.in/yaml.v2 v2.2.8 - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect -) diff --git a/vendor/go.uber.org/zap/go.sum b/vendor/go.uber.org/zap/go.sum deleted file mode 100644 index b330071a..00000000 --- a/vendor/go.uber.org/zap/go.sum +++ /dev/null @@ -1,54 +0,0 @@ -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 h1:Yq9t9jnGoR+dBuitxdo9l6Q7xh/zOyNnYUtDKaQ3x0E= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go index 0c1436f7..ef2f7d96 100644 --- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -71,10 +71,10 @@ type BufferedWriteSyncer struct { // unexported fields for state mu sync.Mutex initialized bool // whether initialize() has run + stopped bool // whether Stop() has run writer *bufio.Writer ticker *time.Ticker stop chan struct{} // closed when flushLoop should stop - stopped bool // whether Stop() has run done chan struct{} // closed when flushLoop has stopped } diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 2d815feb..0885505b 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -219,11 +219,9 @@ func (ce *CheckedEntry) Write(fields ...Field) { for i := range ce.cores { err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) } - if ce.ErrorOutput != nil { - if err != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) - ce.ErrorOutput.Sync() - } + if err != nil && ce.ErrorOutput != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + ce.ErrorOutput.Sync() } should, msg := ce.should, ce.Message diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go index f2a07d78..74919b0c 100644 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -83,7 +83,7 @@ type errorGroup interface { Errors() []error } -// Note that errArry and errArrayElem are very similar to the version +// Note that errArray and errArrayElem are very similar to the version // implemented in the top-level error.go file. We can't re-use this because // that would require exporting errArray as part of the zapcore API. diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 5cf7d917..af220d9b 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -128,6 +128,11 @@ func (enc *jsonEncoder) AddFloat64(key string, val float64) { enc.AppendFloat64(val) } +func (enc *jsonEncoder) AddFloat32(key string, val float32) { + enc.addKey(key) + enc.AppendFloat32(val) +} + func (enc *jsonEncoder) AddInt64(key string, val int64) { enc.addKey(key) enc.AppendInt64(val) @@ -228,7 +233,11 @@ func (enc *jsonEncoder) AppendComplex128(val complex128) { // Because we're always in a quoted string, we can use strconv without // special-casing NaN and +/-Inf. enc.buf.AppendFloat(r, 64) - enc.buf.AppendByte('+') + // If imaginary part is less than 0, minus (-) sign is added by default + // by AppendFloat. + if i >= 0 { + enc.buf.AppendByte('+') + } enc.buf.AppendFloat(i, 64) enc.buf.AppendByte('i') enc.buf.AppendByte('"') @@ -293,7 +302,6 @@ func (enc *jsonEncoder) AppendUint64(val uint64) { } func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } -func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go index 25f10ca1..31ed96e1 100644 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -197,12 +197,14 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { return ce } - counter := s.counts.get(ent.Level, ent.Message) - n := counter.IncCheckReset(ent.Time, s.tick) - if n > s.first && (n-s.first)%s.thereafter != 0 { - s.hook(ent, LogDropped) - return ce + if ent.Level >= _minLevel && ent.Level <= _maxLevel { + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (n-s.first)%s.thereafter != 0 { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) } - s.hook(ent, LogSampled) return s.Core.Check(ent, ce) } diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go index fac0c0e2..c74fc20f 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes_generic.go +++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !gc || purego || !s390x // +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go index 92b63a3c..0f4ae8ba 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 purego !gc +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go index 3e3e7600..248a3824 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && !purego && gc // +build amd64,!purego,gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s index 8f4d1877..4cfa5438 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && !purego && gc // +build amd64,!purego,gc // This code was translated into a form compatible with 6a from the public diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go index 3cf6a22e..8b4453aa 100644 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ b/vendor/golang.org/x/crypto/sha3/register.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.4 // +build go1.4 package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index 485e2d5f..4fcfc924 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s index e2df6412..a0e051b0 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go index 68148f26..5c0710ef 100644 --- a/vendor/golang.org/x/crypto/sha3/shake_generic.go +++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !gc || purego || !s390x // +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go index ddafa826..59c8eb94 100644 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (!amd64 && !386 && !ppc64le) || purego // +build !amd64,!386,!ppc64le purego package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go index fd35f02e..8d947711 100644 --- a/vendor/golang.org/x/crypto/sha3/xor_generic.go +++ b/vendor/golang.org/x/crypto/sha3/xor_generic.go @@ -19,7 +19,7 @@ func xorInGeneric(d *state, buf []byte) { } } -// copyOutGeneric copies ulint64s to a byte buffer. +// copyOutGeneric copies uint64s to a byte buffer. func copyOutGeneric(d *state, b []byte) { for i := 0; len(b) >= 8; i++ { binary.LittleEndian.PutUint64(b, d.a[i]) diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go index 6249ad85..1ce60624 100644 --- a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (amd64 || 386 || ppc64le) && !purego // +build amd64 386 ppc64le // +build !purego diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index f91466f7..038941d7 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -663,6 +663,24 @@ func inHeadIM(p *parser) bool { // Ignore the token. return true case a.Template: + // TODO: remove this divergence from the HTML5 spec. + // + // We don't handle all of the corner cases when mixing foreign + // content (i.e. or ) with