diff --git a/CONTRIBUTING.adoc b/CONTRIBUTING.adoc index e21ff06ab0cc..2726897c3085 100644 --- a/CONTRIBUTING.adoc +++ b/CONTRIBUTING.adoc @@ -35,7 +35,7 @@ Here's how to get set up: 1. For Go, Git and optionally also Docker, follow the links below to get to installation information for these tools: + ** http://golang.org/doc/install[Installing Go] ** http://git-scm.com/book/en/v2/Getting-Started-Installing-Git[Installing Git] -** https://docs.docker.com/installation/#installation[Installing Docker] +** https://docs.docker.com/installation/#installation[Installing Docker]. NOTE: OpenShift now requires at least Docker 1.6. RPMs for CentOS 7 are not yet available in the default yum repositories. If you're running CentOS, please see the link:README.md#docker-16[README] for information on where to get Docker 1.6 RPMs for your platform. 2. Next, create a Go workspace directory: + + ---- diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 7b17b13ca26e..ccf0e29df583 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -536,8 +536,8 @@ }, { "ImportPath": "github.com/docker/distribution", - "Comment": "v2.0.0-alpha.3-30-g06fcf05", - "Rev": "06fcf053e7b45aa8608c6bf32e8d3227b31d3747" + "Comment": "v2.0.0", + "Rev": "62b70f951f30a711a8a81df1865d0afeeaaa0169" }, { "ImportPath": "github.com/docker/docker/builder/command", @@ -640,6 +640,14 @@ "Comment": "0.2.1-433-g5a070ba", "Rev": "5a070ba03ad313e2f0183c9f9c369fd10161583a" }, + { + "ImportPath": "github.com/garyburd/redigo/internal", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/garyburd/redigo/redis", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, { "ImportPath": "github.com/getsentry/raven-go", "Rev": "86cd4063c535cbbcbf43d84424dbd5911ab1b818" @@ -783,6 +791,10 @@ "ImportPath": "github.com/inconshreveable/mousetrap", "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" }, + { + "ImportPath": "github.com/jlhawn/go-crypto", + "Rev": "cd738dde20f0b3782516181b0866c9bb9db47401" + }, { "ImportPath": "github.com/jonboulle/clockwork", "Rev": "72f9bd7c4e0c2a40055ab3d0f09654f730cce982" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.gitignore b/Godeps/_workspace/src/github.com/docker/distribution/.gitignore index 44777ca773c7..1c3ae0a773c7 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/.gitignore +++ b/Godeps/_workspace/src/github.com/docker/distribution/.gitignore @@ -31,3 +31,7 @@ bin/* # Cover profiles *.out + +# Editor/IDE specific files. +*.sublime-project +*.sublime-workspace diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.mailmap b/Godeps/_workspace/src/github.com/docker/distribution/.mailmap index 46317fbff0af..bcfe66352d5e 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/.mailmap +++ b/Godeps/_workspace/src/github.com/docker/distribution/.mailmap @@ -3,3 +3,4 @@ Stephen J Day Stephen Day Olivier Gambier Olivier Gambier Brian Bland Brian Bland Josh Hawn Josh Hawn +Richard Scothern Richard \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS b/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS index 06209a860415..3e3dfa019c66 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS +++ b/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS @@ -1,14 +1,27 @@ Ahmet Alp Balkan +Amy Lindburg Andrey Kostov +Andy Goldstein Anton Tiurin Arnaud Porterie +Ben Firshman Brian Bland +Daisuke Fujita David Lawrence Derek McGowan +Diogo Mónica Donald Huang Frederick F. Kautz IV +Jessie Frazelle Josh Hawn +Kenneth Lim +Mary Anthony +Nathan Sullivan +Nghia Tran Olivier Gambier +Richard Scothern +Shreyas Karnik +Simon Thulbourn Stephen J Day Tianon Gravi xiekeyang diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile index 881bb90ddce2..24d18722356a 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile +++ b/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile @@ -1,14 +1,12 @@ FROM golang:1.4 -ENV CONFIG_PATH /etc/docker/registry/config.yml ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR RUN make PREFIX=/go clean binaries -RUN mkdir -pv "$(dirname $CONFIG_PATH)" -RUN cp -lv ./cmd/registry/config.yml $CONFIG_PATH EXPOSE 5000 -CMD registry $CONFIG_PATH +ENTRYPOINT ["registry"] +CMD ["cmd/registry/config.yml"] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json index bf79860c60c1..dc643683d3fb 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json +++ b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "github.com/docker/distribution", - "GoVersion": "go1.4.1", + "GoVersion": "go1.4.2", "Packages": [ "./..." ], @@ -23,14 +23,14 @@ "Rev": "d3664b76d90508cdda5a6c92042f26eab5db3103" }, { - "ImportPath": "github.com/MSOpenTech/azure-sdk-for-go", - "Comment": "v1.2", - "Rev": "0fbd37144de3adc2aef74db867c0e15e41c7f74a" + "ImportPath": "github.com/MSOpenTech/azure-sdk-for-go/storage", + "Comment": "v1.2-43-gd90753b", + "Rev": "d90753bcad2ed782fcead7392d1e831df29aa2bb" }, { "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.6.4-12-g467d9d5", - "Rev": "467d9d55c2d2c17248441a8fc661561161f40d5e" + "Comment": "v0.7.3", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" }, { "ImportPath": "github.com/bugsnag/bugsnag-go", @@ -64,6 +64,14 @@ "ImportPath": "github.com/docker/libtrust", "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" }, + { + "ImportPath": "github.com/garyburd/redigo/internal", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/garyburd/redigo/redis", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, { "ImportPath": "github.com/gorilla/context", "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" @@ -76,6 +84,10 @@ "ImportPath": "github.com/gorilla/mux", "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" }, + { + "ImportPath": "github.com/jlhawn/go-crypto", + "Rev": "cd738dde20f0b3782516181b0866c9bb9db47401" + }, { "ImportPath": "github.com/yvasiyarov/go-metrics", "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Makefile b/Godeps/_workspace/src/github.com/docker/distribution/Makefile index e75d375b8111..974d0191d96d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/Makefile +++ b/Godeps/_workspace/src/github.com/docker/distribution/Makefile @@ -2,7 +2,8 @@ PREFIX?=$(shell pwd) # Used to populate version variable in main package. -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version `git describe --match 'v[0-9]*' --dirty='.m' --always`" +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) +GO_LDFLAGS=-ldflags "-X `go list ./version`.Version $(VERSION)" .PHONY: clean all fmt vet lint build test binaries .DEFAULT: default @@ -27,7 +28,7 @@ ${PREFIX}/bin/dist: version/version.go $(shell find . -type f -name '*.go') @echo "+ $@" @go build -o $@ ${GO_LDFLAGS} ./cmd/dist -doc/spec/api.md: doc/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template +docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template ./bin/registry-api-descriptor-template $< > $@ vet: @@ -61,3 +62,20 @@ binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/registry-api-descriptor-template clean: @echo "+ $@" @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/registry-api-descriptor-template" + + +# Use the existing docs build cmds from docker/docker +# Later, we will move this into an import +DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) +DOCSPORT := 8000 +DOCKER_DOCS_IMAGE := docker-docs-$(VERSION) +DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE + +docs: docs-build + $(DOCKER_RUN_DOCS) -p $(DOCSPORT):8000 "$(DOCKER_DOCS_IMAGE)" mkdocs serve + +docs-shell: docs-build + $(DOCKER_RUN_DOCS) -p $(DOCSPORT):8000 "$(DOCKER_DOCS_IMAGE)" bash + +docs-build: + docker build -t "$(DOCKER_DOCS_IMAGE)" -f docs/Dockerfile . diff --git a/Godeps/_workspace/src/github.com/docker/distribution/README.md b/Godeps/_workspace/src/github.com/docker/distribution/README.md index f0bc54d0e9da..fcb470c857b2 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/README.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/README.md @@ -1,47 +1,37 @@ -> **Notice:** *This repository hosts experimental components that are -> currently under heavy and fast-paced development, not-ready for public -> consumption. If you are looking for the stable registry, please head over to -> [docker/docker-registry](https://github.com/docker/docker-registry) -> instead.* - -Distribution -============ +# Distribution The Docker toolset to pack, ship, store, and deliver content. -The main product of this repository is the new registry implementation for -storing and distributing docker images. It supersedes the [docker/docker- +This repository's main product is the Docker Registry Service 2.0 implementation +for storing and distributing Docker images. It supersedes the [docker/docker- registry](https://github.com/docker/docker-registry) project with a new API design, focused around security and performance. -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with docker as they are without. - This repository contains the following components: -- **registry (beta):** An implementation of the [Docker Registry HTTP API - V2](doc/spec/api.md) for use with docker 1.5+. -- **libraries (unstable):** A rich set of libraries for interacting with - distribution components. Please see - [godoc](http://godoc.org/github.com/docker/distribution) for details. Note - that the libraries *are not* considered stable. -- **dist (experimental):** An experimental tool to provide distribution - oriented functionality without the docker daemon. -- **specifications**: _Distribution_ related specifications are available in - [doc/spec](doc/spec). -- **documentation:** Documentation is available in [doc](doc/overview.md). +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](http://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **dist** | An _experimental_ tool to provide distribution, oriented functionality without the `docker` daemon. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](http://docs.docker.com). This repository [contains the subset](docs/overview.md) related just to the registry. | -### How will this integrate with Docker engine? +### How does this integrate with Docker engine? -This project should provide an implementation to a V2 API for use in the -Docker core project. The API should be embeddable and simplify the process of -securely pulling and pushing content from docker daemons. +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. ### What are the long term goals of the Distribution project? -Design a professional grade and extensible content distribution system, that -allow users to: +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: * Enjoy an efficient, secured and reliable way to store, manage, package and exchange content @@ -49,8 +39,7 @@ allow users to: * Implement their own home made solution through good specs, and solid extensions mechanism. -Features --------- +## More about Registry 2.0 The new registry implementation provides the following benefits: @@ -60,190 +49,69 @@ The new registry implementation provides the following benefits: - pluggable storage backend - webhook notifications -Installation ------------- +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). -**TODO(stevvooe):** Add the following here: -- docker file -- binary builds for non-docker environment (test installations, etc.) - -Configuration -------------- - -The registry server can be configured with a yaml file. The following is a -simple example that can used for local development: - -```yaml -version: 0.1 -loglevel: debug -storage: - filesystem: - rootdirectory: /tmp/registry-dev -http: - addr: localhost:5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 -``` - -The above configures the registry instance to run on port 5000, binding to -"localhost", with the debug server enabled. Registry data will be stored in -"/tmp/registry-dev". Logging will be in "debug" mode, which is the most -verbose. - -A similar simple configuration is available at [cmd/registry/config.yml], -which is generally useful for local development. - -**TODO(stevvooe): Need a "best practice" configuration overview. Perhaps, we -can point to a documentation section. - -For full details about configuring a registry server, please see [the -documentation](doc/configuration.md). - -### Upgrading - -**TODO:** Add a section about upgrading from V1 registry along with link to -migrating in documentation. - -Build ------ - -If a go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - -```sh -go get github.com/docker/distribution/cmd/registry -``` - -The above will install the source repository into the `GOPATH`. The `registry` -binary can then be run with the following: - -``` -$ $GOPATH/bin/registry -version -$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown -``` - -The registry can be run with the default config using the following -incantantation: - -``` -$ $GOPATH/bin/registry $GOPATH/src/github.com/docker/distribution/cmd/registry/config.yml -INFO[0000] endpoint local-8082 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown -INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown -INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown -INFO[0000] debug server listening localhost:5001 -``` - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - -``` -go get github.com/tools/godep github.com/golang/lint/golint -``` - -**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have -to think about how to interact with Godeps properly. - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - -``` -$ GOPATH=`godep path`:$GOPATH make -+ clean -+ fmt -+ vet -+ lint -+ build -github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar -github.com/Sirupsen/logrus -github.com/docker/libtrust -... -github.com/yvasiyarov/gorelic -github.com/docker/distribution/registry/handlers -github.com/docker/distribution/cmd/registry -+ test -... -ok github.com/docker/distribution/digest 7.875s -ok github.com/docker/distribution/manifest 0.028s -ok github.com/docker/distribution/notifications 17.322s -? github.com/docker/distribution/registry [no test files] -ok github.com/docker/distribution/registry/api/v2 0.101s -? github.com/docker/distribution/registry/auth [no test files] -ok github.com/docker/distribution/registry/auth/silly 0.011s -... -+ /Users/sday/go/src/github.com/docker/distribution/bin/registry -+ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template -+ /Users/sday/go/src/github.com/docker/distribution/bin/dist -+ binaries -``` - -The above provides a repeatable build using the contents of the vendored -Godeps directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - -```sh -$ ./bin/registry -version -./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m -``` - -### Developing - -The above approaches are helpful for small experimentation. If more complex -tasks are at hand, it is recommended to employ the full power of `godep`. - -The Makefile is designed to have its `GOPATH` defined externally. This allows -one to experiment with various development environment setups. This is -primarily useful when testing upstream bugfixes, by modifying local code. This -can be demonstrated using `godep` to migrate the `GOPATH` to use the specified -dependencies. The `GOPATH` can be migrated to the current package versions -declared in `Godeps` with the following command: - -```sh -godep restore -``` - -> **WARNING:** This command will checkout versions of the code specified in -> Godeps/Godeps.json, modifying the contents of `GOPATH`. If this is -> undesired, it is recommended to create a workspace devoted to work on the -> _Distribution_ project. - -With a successful run of the above command, one can now use `make` without -specifying the `GOPATH`: - -```sh -$ make -``` - -If that is successful, standard `go` commands, such as `go test` should work, -per package, without issue. - -Support -------- +### Who needs to deploy a registry? -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](http://docs.docker.com/installation) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. -IRC: #docker-distribution on FreeNode -Issue Tracker: github.com/docker/distribution/issues -Google Groups: https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -Mailing List: docker@dockerproject.org +For some users and even companies, this default behavior is sufficient. For +others, it is not. -Contribute ----------- +For example, users with their own software products and may want to maintain an +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](docs/deploying.md) +may be the better choice. + +## Contribute Please see [CONTRIBUTING.md](CONTRIBUTING.md). -License -------- +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License This project is distributed under [Apache License, Version 2.0](LICENSE.md). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config.yml index bb3ade118455..5dd39cb3b7fd 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config.yml +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config.yml @@ -1,6 +1,12 @@ version: 0.1 -loglevel: debug +log: + level: debug + fields: + service: registry + environment: development storage: + cache: + layerinfo: inmemory filesystem: rootdirectory: /tmp/registry-dev http: @@ -8,6 +14,15 @@ http: secret: asecretforlocaldevelopment debug: addr: localhost:5001 +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms notifications: endpoints: - name: local-8082 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go index fa5305ebc347..52eecf8f217a 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go @@ -1,20 +1,27 @@ package main import ( + "crypto/tls" + "crypto/x509" _ "expvar" "flag" "fmt" + "io/ioutil" "net/http" _ "net/http/pprof" "os" + "time" log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/formatters/logstash" "github.com/bugsnag/bugsnag-go" "github.com/docker/distribution/configuration" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" + _ "github.com/docker/distribution/health" _ "github.com/docker/distribution/registry/auth/silly" _ "github.com/docker/distribution/registry/auth/token" "github.com/docker/distribution/registry/handlers" + _ "github.com/docker/distribution/registry/storage/driver/azure" _ "github.com/docker/distribution/registry/storage/driver/filesystem" _ "github.com/docker/distribution/registry/storage/driver/inmemory" _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" @@ -22,7 +29,6 @@ import ( "github.com/docker/distribution/version" gorhandlers "github.com/gorilla/handlers" "github.com/yvasiyarov/gorelic" - "golang.org/x/net/context" ) var showVersion bool @@ -41,15 +47,17 @@ func main() { } ctx := context.Background() + ctx = context.WithValue(ctx, "version", version.Version) config, err := resolveConfiguration() if err != nil { fatalf("configuration error: %v", err) } - log.SetLevel(logLevel(config.Loglevel)) - ctx = context.WithValue(ctx, "version", version.Version) - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "version")) + ctx, err = configureLogging(ctx, config) + if err != nil { + fatalf("error configuring logger: %v", err) + } app := handlers.NewApp(ctx, *config) handler := configureReporting(app) @@ -60,14 +68,46 @@ func main() { } if config.HTTP.TLS.Certificate == "" { - ctxu.GetLogger(app).Infof("listening on %v", config.HTTP.Addr) + context.GetLogger(app).Infof("listening on %v", config.HTTP.Addr) if err := http.ListenAndServe(config.HTTP.Addr, handler); err != nil { - ctxu.GetLogger(app).Fatalln(err) + context.GetLogger(app).Fatalln(err) } } else { - ctxu.GetLogger(app).Infof("listening on %v, tls", config.HTTP.Addr) - if err := http.ListenAndServeTLS(config.HTTP.Addr, config.HTTP.TLS.Certificate, config.HTTP.TLS.Key, handler); err != nil { - ctxu.GetLogger(app).Fatalln(err) + tlsConf := &tls.Config{ + ClientAuth: tls.NoClientCert, + } + + if len(config.HTTP.TLS.ClientCAs) != 0 { + pool := x509.NewCertPool() + + for _, ca := range config.HTTP.TLS.ClientCAs { + caPem, err := ioutil.ReadFile(ca) + if err != nil { + context.GetLogger(app).Fatalln(err) + } + + if ok := pool.AppendCertsFromPEM(caPem); !ok { + context.GetLogger(app).Fatalln(fmt.Errorf("Could not add CA to pool")) + } + } + + for _, subj := range pool.Subjects() { + context.GetLogger(app).Debugf("CA Subject: %s", string(subj)) + } + + tlsConf.ClientAuth = tls.RequireAndVerifyClientCert + tlsConf.ClientCAs = pool + } + + context.GetLogger(app).Infof("listening on %v, tls", config.HTTP.Addr) + server := &http.Server{ + Addr: config.HTTP.Addr, + Handler: handler, + TLSConfig: tlsConf, + } + + if err := server.ListenAndServeTLS(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key); err != nil { + context.GetLogger(app).Fatalln(err) } } } @@ -109,16 +149,6 @@ func resolveConfiguration() (*configuration.Configuration, error) { return config, nil } -func logLevel(level configuration.Loglevel) log.Level { - l, err := log.ParseLevel(string(level)) - if err != nil { - log.Warnf("error parsing level %q: %v", level, err) - l = log.InfoLevel - } - - return l -} - func configureReporting(app *handlers.App) http.Handler { var handler http.Handler = app @@ -146,7 +176,7 @@ func configureReporting(app *handlers.App) http.Handler { agent.NewrelicName = app.Config.Reporting.NewRelic.Name } agent.CollectHTTPStat = true - agent.Verbose = true + agent.Verbose = app.Config.Reporting.NewRelic.Verbose agent.Run() handler = agent.WrapHTTPHandler(handler) @@ -155,6 +185,74 @@ func configureReporting(app *handlers.App) http.Handler { return handler } +// configureLogging prepares the context with a logger using the +// configuration. +func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { + if config.Log.Level == "" && config.Log.Formatter == "" { + // If no config for logging is set, fallback to deprecated "Loglevel". + log.SetLevel(logLevel(config.Loglevel)) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) + return ctx, nil + } + + log.SetLevel(logLevel(config.Log.Level)) + + formatter := config.Log.Formatter + if formatter == "" { + formatter = "text" // default formatter + } + + switch formatter { + case "json": + log.SetFormatter(&log.JSONFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "text": + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "logstash": + log.SetFormatter(&logstash.LogstashFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + default: + // just let the library use default on empty string. + if config.Log.Formatter != "" { + return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) + } + } + + if config.Log.Formatter != "" { + log.Debugf("using %q logging formatter", config.Log.Formatter) + } + + // log the application version with messages + ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) + + if len(config.Log.Fields) > 0 { + // build up the static fields, if present. + var fields []interface{} + for k := range config.Log.Fields { + fields = append(fields, k) + } + + ctx = context.WithValues(ctx, config.Log.Fields) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) + } + + return ctx, nil +} + +func logLevel(level configuration.Loglevel) log.Level { + l, err := log.ParseLevel(string(level)) + if err != nil { + l = log.InfoLevel + log.Warnf("error parsing level %q: %v, using %q ", level, err, l) + } + + return l +} + // debugServer starts the debug server with pprof, expvar among other // endpoints. The addr should not be exposed externally. For most of these to // work, tls cannot be enabled on the endpoint, so it is generally separate. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/README.md b/Godeps/_workspace/src/github.com/docker/distribution/configuration/README.md index 2a1279083f41..69cb39e56cec 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/configuration/README.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/README.md @@ -22,7 +22,7 @@ storage: s3: region: us-east-1 bucket: my-bucket - rootpath: /registry + rootdirectory: /registry encrypt: true secure: false accesskey: SAMPLEACCESSKEY diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go index 6c03b27fad1b..3d302e1cced5 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go @@ -16,8 +16,24 @@ type Configuration struct { // Version is the version which defines the format of the rest of the configuration Version Version `yaml:"version"` - // Loglevel is the level at which registry operations are logged - Loglevel Loglevel `yaml:"loglevel"` + // Log supports setting various parameters related to the logging + // subsystem. + Log struct { + // Level is the granularity at which registry operations are logged. + Level Loglevel `yaml:"level"` + + // Formatter overrides the default formatter with another. Options + // include "text", "json" and "logstash". + Formatter string `yaml:"formatter,omitempty"` + + // Fields allows users to specify static string fields to include in + // the logger context. + Fields map[string]interface{} `yaml:"fields,omitempty"` + } + + // Loglevel is the level at which registry operations are logged. This is + // deprecated. Please use Log.Level in the future. + Loglevel Loglevel `yaml:"loglevel,omitempty"` // Storage is the configuration for the registry's storage driver Storage Storage `yaml:"storage"` @@ -57,6 +73,10 @@ type Configuration struct { // contain the private portion for the file specified in // Certificate. Key string `yaml:"key,omitempty"` + + // Specifies the CA certs for client authentication + // A file may contain multiple CA certificates encoded as PEM + ClientCAs []string `yaml:"clientcas,omitempty"` } `yaml:"tls,omitempty"` // Debug configures the http debug interface, if specified. This can @@ -71,6 +91,36 @@ type Configuration struct { // Notifications specifies configuration about various endpoint to which // registry events are dispatched. Notifications Notifications `yaml:"notifications,omitempty"` + + // Redis configures the redis pool available to the registry webapp. + Redis struct { + // Addr specifies the the redis instance available to the application. + Addr string `yaml:"addr,omitempty"` + + // Password string to use when making a connection. + Password string `yaml:"password,omitempty"` + + // DB specifies the database to connect to on the redis instance. + DB int `yaml:"db,omitempty"` + + DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect + ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data + WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data + + // Pool configures the behavior of the redis connection pool. + Pool struct { + // MaxIdle sets the maximum number of idle connections. + MaxIdle int `yaml:"maxidle,omitempty"` + + // MaxActive sets the maximum number of connections that should be + // opened before blocking a connection request. + MaxActive int `yaml:"maxactive,omitempty"` + + // IdleTimeout sets the amount time to wait before closing + // inactive connections. + IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` + } `yaml:"pool,omitempty"` + } `yaml:"redis,omitempty"` } // v0_1Configuration is a Version 0.1 Configuration struct @@ -137,7 +187,12 @@ type Storage map[string]Parameters func (storage Storage) Type() string { // Return only key in this map for k := range storage { - return k + switch k { + case "cache": + // allow configuration of caching + default: + return k + } } return "" } @@ -161,9 +216,17 @@ func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { if len(storageMap) > 1 { types := make([]string, 0, len(storageMap)) for k := range storageMap { - types = append(types, k) + switch k { + case "cache": + // allow configuration of caching + default: + types = append(types, k) + } + } + + if len(types) > 1 { + return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) } - return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) } *storage = storageMap return nil @@ -293,6 +356,8 @@ type NewRelicReporting struct { LicenseKey string `yaml:"licensekey,omitempty"` // Name is the component name of the registry in NewRelic Name string `yaml:"name,omitempty"` + // Verbose configures debug output to STDOUT + Verbose bool `yaml:"verbose,omitempty"` } // Middleware configures named middlewares to be applied at injection points. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go index 5a6abf90ecc7..5c5d68b3e834 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go @@ -15,19 +15,26 @@ func Test(t *testing.T) { TestingT(t) } // configStruct is a canonical example configuration, which should map to configYamlV0_1 var configStruct = Configuration{ - Version: "0.1", + Version: "0.1", + Log: struct { + Level Loglevel `yaml:"level"` + Formatter string `yaml:"formatter,omitempty"` + Fields map[string]interface{} `yaml:"fields,omitempty"` + }{ + Fields: map[string]interface{}{"environment": "test"}, + }, Loglevel: "info", Storage: Storage{ "s3": Parameters{ - "region": "us-east-1", - "bucket": "my-bucket", - "rootpath": "/registry", - "encrypt": true, - "secure": false, - "accesskey": "SAMPLEACCESSKEY", - "secretkey": "SUPERSECRET", - "host": nil, - "port": 42, + "region": "us-east-1", + "bucket": "my-bucket", + "rootdirectory": "/registry", + "encrypt": true, + "secure": false, + "accesskey": "SAMPLEACCESSKEY", + "secretkey": "SUPERSECRET", + "host": nil, + "port": 42, }, }, Auth: Auth{ @@ -52,17 +59,41 @@ var configStruct = Configuration{ }, }, }, + HTTP: struct { + Addr string `yaml:"addr,omitempty"` + Prefix string `yaml:"prefix,omitempty"` + Secret string `yaml:"secret,omitempty"` + TLS struct { + Certificate string `yaml:"certificate,omitempty"` + Key string `yaml:"key,omitempty"` + ClientCAs []string `yaml:"clientcas,omitempty"` + } `yaml:"tls,omitempty"` + Debug struct { + Addr string `yaml:"addr,omitempty"` + } `yaml:"debug,omitempty"` + }{ + TLS: struct { + Certificate string `yaml:"certificate,omitempty"` + Key string `yaml:"key,omitempty"` + ClientCAs []string `yaml:"clientcas,omitempty"` + }{ + ClientCAs: []string{"/path/to/ca.pem"}, + }, + }, } // configYamlV0_1 is a Version 0.1 yaml document representing configStruct var configYamlV0_1 = ` version: 0.1 +log: + fields: + environment: test loglevel: info storage: s3: region: us-east-1 bucket: my-bucket - rootpath: /registry + rootdirectory: /registry encrypt: true secure: false accesskey: SAMPLEACCESSKEY @@ -82,6 +113,9 @@ notifications: reporting: bugsnag: apikey: BugsnagApiKey +http: + clientcas: + - /path/to/ca.pem ` // inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory @@ -136,6 +170,7 @@ func (suite *ConfigSuite) TestParseSimple(c *C) { func (suite *ConfigSuite) TestParseInmemory(c *C) { suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} suite.expectedConfig.Reporting = Reporting{} + suite.expectedConfig.Log.Fields = nil config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) c.Assert(err, IsNil) @@ -150,6 +185,7 @@ func (suite *ConfigSuite) TestParseIncomplete(c *C) { _, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) c.Assert(err, NotNil) + suite.expectedConfig.Log.Fields = nil suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}} suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}} suite.expectedConfig.Reporting = Reporting{} @@ -303,13 +339,19 @@ func copyConfig(config Configuration) *Configuration { configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor()) configCopy.Loglevel = config.Loglevel + configCopy.Log = config.Log + configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields)) + for k, v := range config.Log.Fields { + configCopy.Log.Fields[k] = v + } + configCopy.Storage = Storage{config.Storage.Type(): Parameters{}} for k, v := range config.Storage.Parameters() { configCopy.Storage.setParameter(k, v) } configCopy.Reporting = Reporting{ Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint}, - NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name}, + NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name, config.Reporting.NewRelic.Verbose}, } configCopy.Auth = Auth{config.Auth.Type(): Parameters{}} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/context.go b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go new file mode 100644 index 000000000000..45a35ad176e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go @@ -0,0 +1,76 @@ +package context + +import ( + "code.google.com/p/go-uuid/uuid" + "golang.org/x/net/context" +) + +// Context is a copy of Context from the golang.org/x/net/context package. +type Context interface { + context.Context +} + +// instanceContext is a context that provides only an instance id. It is +// provided as the main background context. +type instanceContext struct { + Context + id string // id of context, logged as "instance.id" +} + +func (ic *instanceContext) Value(key interface{}) interface{} { + if key == "instance.id" { + return ic.id + } + + return ic.Context.Value(key) +} + +var background = &instanceContext{ + Context: context.Background(), + id: uuid.New(), +} + +// Background returns a non-nil, empty Context. The background context +// provides a single key, "instance.id" that is globally unique to the +// process. +func Background() Context { + return background +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. Use context Values only for request-scoped data that transits processes +// and APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} + +// stringMapContext is a simple context implementation that checks a map for a +// key, falling back to a parent if not present. +type stringMapContext struct { + context.Context + m map[string]interface{} +} + +// WithValues returns a context that proxies lookups through a map. Only +// supports string keys. +func WithValues(ctx context.Context, m map[string]interface{}) context.Context { + mo := make(map[string]interface{}, len(m)) // make our own copy. + for k, v := range m { + mo[k] = v + } + + return stringMapContext{ + Context: ctx, + m: mo, + } +} + +func (smc stringMapContext) Value(key interface{}) interface{} { + if ks, ok := key.(string); ok { + if v, ok := smc.m[ks]; ok { + return v + } + } + + return smc.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/http.go b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go index 357f0dc3275e..98ab436d97ad 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/context/http.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go @@ -2,27 +2,73 @@ package context import ( "errors" + "net" "net/http" "strings" "sync" "time" "code.google.com/p/go-uuid/uuid" + log "github.com/Sirupsen/logrus" "github.com/gorilla/mux" - "golang.org/x/net/context" ) // Common errors used with this package. var ( - ErrNoRequestContext = errors.New("no http request in context") + ErrNoRequestContext = errors.New("no http request in context") + ErrNoResponseWriterContext = errors.New("no http response in context") ) +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warnf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + proxies := strings.Split(prior, ",") + if len(proxies) > 0 { + remoteAddr := strings.Trim(proxies[0], " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} + // WithRequest places the request on the context. The context of the request // is assigned a unique id, available at "http.request.id". The request itself // is available at "http.request". Other common attributes are available under // the prefix "http.request.". If a request is already present on the context, // this method will panic. -func WithRequest(ctx context.Context, r *http.Request) context.Context { +func WithRequest(ctx Context, r *http.Request) Context { if ctx.Value("http.request") != nil { // NOTE(stevvooe): This needs to be considered a programming error. It // is unlikely that we'd want to have more than one request in @@ -41,7 +87,7 @@ func WithRequest(ctx context.Context, r *http.Request) context.Context { // GetRequest returns the http request in the given context. Returns // ErrNoRequestContext if the context does not have an http request associated // with it. -func GetRequest(ctx context.Context) (*http.Request, error) { +func GetRequest(ctx Context) (*http.Request, error) { if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { return r, nil } @@ -50,13 +96,13 @@ func GetRequest(ctx context.Context) (*http.Request, error) { // GetRequestID attempts to resolve the current request id, if possible. An // error is return if it is not available on the context. -func GetRequestID(ctx context.Context) string { +func GetRequestID(ctx Context) string { return GetStringValue(ctx, "http.request.id") } // WithResponseWriter returns a new context and response writer that makes // interesting response statistics available within the context. -func WithResponseWriter(ctx context.Context, w http.ResponseWriter) (context.Context, http.ResponseWriter) { +func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { irw := &instrumentedResponseWriter{ ResponseWriter: w, Context: ctx, @@ -65,6 +111,20 @@ func WithResponseWriter(ctx context.Context, w http.ResponseWriter) (context.Con return irw, irw } +// GetResponseWriter returns the http.ResponseWriter from the provided +// context. If not present, ErrNoResponseWriterContext is returned. The +// returned instance provides instrumentation in the context. +func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { + v := ctx.Value("http.response") + + rw, ok := v.(http.ResponseWriter) + if !ok || rw == nil { + return nil, ErrNoResponseWriterContext + } + + return rw, nil +} + // getVarsFromRequest let's us change request vars implementation for testing // and maybe future changes. var getVarsFromRequest = mux.Vars @@ -74,7 +134,7 @@ var getVarsFromRequest = mux.Vars // example, if looking for the variable "name", it can be accessed as // "vars.name". Implementations that are accessing values need not know that // the underlying context is implemented with gorilla/mux vars. -func WithVars(ctx context.Context, r *http.Request) context.Context { +func WithVars(ctx Context, r *http.Request) Context { return &muxVarsContext{ Context: ctx, vars: getVarsFromRequest(r), @@ -84,7 +144,7 @@ func WithVars(ctx context.Context, r *http.Request) context.Context { // GetRequestLogger returns a logger that contains fields from the request in // the current context. If the request is not available in the context, no // fields will display. Request loggers can safely be pushed onto the context. -func GetRequestLogger(ctx context.Context) Logger { +func GetRequestLogger(ctx Context) Logger { return GetLogger(ctx, "http.request.id", "http.request.method", @@ -100,7 +160,7 @@ func GetRequestLogger(ctx context.Context) Logger { // Because the values are read at call time, pushing a logger returned from // this function on the context will lead to missing or invalid data. Only // call this at the end of a request, after the response has been written. -func GetResponseLogger(ctx context.Context) Logger { +func GetResponseLogger(ctx Context) Logger { l := getLogrusLogger(ctx, "http.response.written", "http.response.status", @@ -109,7 +169,7 @@ func GetResponseLogger(ctx context.Context) Logger { duration := Since(ctx, "http.request.startedat") if duration > 0 { - l = l.WithField("http.response.duration", duration) + l = l.WithField("http.response.duration", duration.String()) } return l @@ -117,7 +177,7 @@ func GetResponseLogger(ctx context.Context) Logger { // httpRequestContext makes information about a request available to context. type httpRequestContext struct { - context.Context + Context startedAt time.Time id string @@ -147,7 +207,7 @@ func (ctx *httpRequestContext) Value(key interface{}) interface{} { case "uri": return ctx.r.RequestURI case "remoteaddr": - return ctx.r.RemoteAddr + return RemoteAddr(ctx.r) case "method": return ctx.r.Method case "host": @@ -176,7 +236,7 @@ fallback: } type muxVarsContext struct { - context.Context + Context vars map[string]string } @@ -202,7 +262,7 @@ func (ctx *muxVarsContext) Value(key interface{}) interface{} { // context. type instrumentedResponseWriter struct { http.ResponseWriter - context.Context + Context mu sync.Mutex status int diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go b/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go index df3734e8631c..42c78b750697 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go @@ -2,11 +2,12 @@ package context import ( "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" "reflect" "testing" "time" - - "golang.org/x/net/context" ) func TestWithRequest(t *testing.T) { @@ -20,7 +21,7 @@ func TestWithRequest(t *testing.T) { req.Header.Set("Referer", "foo.com/referer") req.Header.Set("User-Agent", "test/0.1") - ctx := WithRequest(context.Background(), &req) + ctx := WithRequest(Background(), &req) for _, testcase := range []struct { key string expected interface{} @@ -129,7 +130,7 @@ func (trw *testResponseWriter) Flush() { func TestWithResponseWriter(t *testing.T) { trw := testResponseWriter{} - ctx, rw := WithResponseWriter(context.Background(), &trw) + ctx, rw := WithResponseWriter(Background(), &trw) if ctx.Value("http.response") != &trw { t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), &trw) @@ -180,7 +181,7 @@ func TestWithVars(t *testing.T) { return vars } - ctx := WithVars(context.Background(), &req) + ctx := WithVars(Background(), &req) for _, testcase := range []struct { key string expected interface{} @@ -205,3 +206,67 @@ func TestWithVars(t *testing.T) { } } } + +// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test +// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten +// at the transport layer to 127.0.0.1: . However, as the X-Forwarded-For header +// just contains the IP address, it is different enough for testing. +func TestRemoteAddr(t *testing.T) { + var expectedRemote string + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + if r.RemoteAddr == expectedRemote { + t.Errorf("Unexpected matching remote addresses") + } + + actualRemote := RemoteAddr(r) + if expectedRemote != actualRemote { + t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote) + } + + w.WriteHeader(200) + })) + + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + + proxy := httputil.NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxy) + defer frontend.Close() + + // X-Forwarded-For set by proxy + expectedRemote = "127.0.0.1" + proxyReq, err := http.NewRequest("GET", frontend.URL, nil) + if err != nil { + t.Fatal(err) + } + + _, err = http.DefaultClient.Do(proxyReq) + if err != nil { + t.Fatal(err) + } + + // RemoteAddr in X-Real-Ip + getReq, err := http.NewRequest("GET", backend.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedRemote = "1.2.3.4" + getReq.Header["X-Real-ip"] = []string{expectedRemote} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } + + // Valid X-Real-Ip and invalid X-Forwarded-For + getReq.Header["X-forwarded-for"] = []string{"1.2.3"} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go b/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go index bec8fade49dc..78e4212a0b67 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/Sirupsen/logrus" - "golang.org/x/net/context" ) // Logger provides a leveled-logging interface. @@ -41,8 +40,22 @@ type Logger interface { } // WithLogger creates a new context with provided logger. -func WithLogger(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, "logger", logger) +func WithLogger(ctx Context, logger Logger) Context { + return WithValue(ctx, "logger", logger) +} + +// GetLoggerWithField returns a logger instance with the specified field key +// and value without affecting the context. Extra specified keys will be +// resolved from the context. +func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) +} + +// GetLoggerWithFields returns a logger instance with the specified fields +// without affecting the context. Extra specified keys will be resolved from +// the context. +func GetLoggerWithFields(ctx Context, fields map[string]interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithFields(logrus.Fields(fields)) } // GetLogger returns the logger from the current context, if present. If one @@ -51,7 +64,7 @@ func WithLogger(ctx context.Context, logger Logger) context.Context { // argument passed to GetLogger will be passed to fmt.Sprint when expanded as // a logging key field. If context keys are integer constants, for example, // its recommended that a String method is implemented. -func GetLogger(ctx context.Context, keys ...interface{}) Logger { +func GetLogger(ctx Context, keys ...interface{}) Logger { return getLogrusLogger(ctx, keys...) } @@ -59,7 +72,7 @@ func GetLogger(ctx context.Context, keys ...interface{}) Logger { // are provided, they will be resolved on the context and included in the // logger. Only use this function if specific logrus functionality is // required. -func getLogrusLogger(ctx context.Context, keys ...interface{}) *logrus.Entry { +func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { var logger *logrus.Entry // Get a logger, if it is present. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go new file mode 100644 index 000000000000..1115fc1f65c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go @@ -0,0 +1,99 @@ +package context + +import ( + "runtime" + "time" + + "code.google.com/p/go-uuid/uuid" +) + +// WithTrace allocates a traced timing span in a new context. This allows a +// caller to track the time between calling WithTrace and the returned done +// function. When the done function is called, a log message is emitted with a +// "trace.duration" field, corresponding to the elapased time and a +// "trace.func" field, corresponding to the function that called WithTrace. +// +// The logging keys "trace.id" and "trace.parent.id" are provided to implement +// dapper-like tracing. This function should be complemented with a WithSpan +// method that could be used for tracing distributed RPC calls. +// +// The main benefit of this function is to post-process log messages or +// intercept them in a hook to provide timing data. Trace ids and parent ids +// can also be linked to provide call tracing, if so required. +// +// Here is an example of the usage: +// +// func timedOperation(ctx Context) { +// ctx, done := WithTrace(ctx) +// defer done("this will be the log message") +// // ... function body ... +// } +// +// If the function ran for roughly 1s, such a usage would emit a log message +// as follows: +// +// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... +// +// Notice that the function name is automatically resolved, along with the +// package and a trace id is emitted that can be linked with parent ids. +func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { + if ctx == nil { + ctx = Background() + } + + pc, file, line, _ := runtime.Caller(1) + f := runtime.FuncForPC(pc) + ctx = &traced{ + Context: ctx, + id: uuid.New(), + start: time.Now(), + parent: GetStringValue(ctx, "trace.id"), + fnname: f.Name(), + file: file, + line: line, + } + + return ctx, func(format string, a ...interface{}) { + GetLogger(ctx, "trace.duration", "trace.id", "trace.parent.id", + "trace.func", "trace.file", "trace.line"). + Infof(format, a...) // info may be too chatty. + } +} + +// traced represents a context that is traced for function call timing. It +// also provides fast lookup for the various attributes that are available on +// the trace. +type traced struct { + Context + id string + parent string + start time.Time + fnname string + file string + line int +} + +func (ts *traced) Value(key interface{}) interface{} { + switch key { + case "trace.start": + return ts.start + case "trace.duration": + return time.Since(ts.start) + case "trace.id": + return ts.id + case "trace.parent.id": + if ts.parent == "" { + return nil // must return nil to signal no parent. + } + + return ts.parent + case "trace.func": + return ts.fnname + case "trace.file": + return ts.file + case "trace.line": + return ts.line + } + + return ts.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go b/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go new file mode 100644 index 000000000000..4b969fbb0d00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go @@ -0,0 +1,85 @@ +package context + +import ( + "runtime" + "testing" + "time" +) + +// TestWithTrace ensures that tracing has the expected values in the context. +func TestWithTrace(t *testing.T) { + pc, file, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + + base := []valueTestCase{ + { + key: "trace.id", + notnilorempty: true, + }, + + { + key: "trace.file", + expected: file, + notnilorempty: true, + }, + { + key: "trace.line", + notnilorempty: true, + }, + { + key: "trace.start", + notnilorempty: true, + }, + } + + ctx, done := WithTrace(Background()) + defer done("this will be emitted at end of test") + + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + })) + + traced := func() { + parentID := ctx.Value("trace.id") // ensure the parent trace id is correct. + + pc, _, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + ctx, done := WithTrace(ctx) + defer done("this should be subordinate to the other trace") + time.Sleep(time.Second) + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + }, valueTestCase{ + key: "trace.parent.id", + expected: parentID, + })) + } + traced() + + time.Sleep(time.Second) +} + +type valueTestCase struct { + key string + expected interface{} + notnilorempty bool // just check not empty/not nil +} + +func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) { + + for _, testcase := range values { + v := ctx.Value(testcase.key) + if testcase.notnilorempty { + if v == nil || v == "" { + t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v) + } + continue + } + + if v != testcase.expected { + t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/util.go b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go index 7202c160d908..c0aff00d283f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/context/util.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go @@ -2,14 +2,12 @@ package context import ( "time" - - "golang.org/x/net/context" ) // Since looks up key, which should be a time.Time, and returns the duration // since that time. If the key is not found, the value returned will be zero. // This is helpful when inferring metrics related to context execution times. -func Since(ctx context.Context, key interface{}) time.Duration { +func Since(ctx Context, key interface{}) time.Duration { startedAtI := ctx.Value(key) if startedAtI != nil { if startedAt, ok := startedAtI.(time.Time); ok { @@ -22,7 +20,7 @@ func Since(ctx context.Context, key interface{}) time.Duration { // GetStringValue returns a string value from the context. The empty string // will be returned if not found. -func GetStringValue(ctx context.Context, key string) (value string) { +func GetStringValue(ctx Context, key string) (value string) { stringi := ctx.Value(key) if stringi != nil { if valuev, ok := stringi.(string); ok { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go index d640026cb802..ba9731fbbba7 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go @@ -2,7 +2,6 @@ package digest import ( "bytes" - "crypto/sha256" "fmt" "hash" "io" @@ -72,13 +71,13 @@ func ParseDigest(s string) (Digest, error) { // FromReader returns the most valid digest for the underlying content. func FromReader(rd io.Reader) (Digest, error) { - h := sha256.New() + digester := NewCanonicalDigester() - if _, err := io.Copy(h, rd); err != nil { + if _, err := io.Copy(digester, rd); err != nil { return "", err } - return NewDigest("sha256", h), nil + return digester.Digest(), nil } // FromTarArchive produces a tarsum digest from reader rd. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go index 9094d662e49e..d5fc5443f492 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go @@ -9,8 +9,8 @@ import ( // equivalent to hash.Hash but provides methods for returning the Digest type // rather than raw bytes. type Digester struct { - alg string - hash hash.Hash + alg string + hash.Hash } // NewDigester create a new Digester with the given hashing algorithm and instance @@ -18,27 +18,37 @@ type Digester struct { func NewDigester(alg string, h hash.Hash) Digester { return Digester{ alg: alg, - hash: h, + Hash: h, } } // NewCanonicalDigester is a convenience function to create a new Digester with -// out default settings. +// our default settings. func NewCanonicalDigester() Digester { return NewDigester("sha256", sha256.New()) } -// Write data to the digester. These writes cannot fail. -func (d *Digester) Write(p []byte) (n int, err error) { - return d.hash.Write(p) -} - // Digest returns the current digest for this digester. func (d *Digester) Digest() Digest { - return NewDigest(d.alg, d.hash) + return NewDigest(d.alg, d.Hash) +} + +// ResumableHash is the common interface implemented by all resumable hash +// functions. +type ResumableHash interface { + // ResumableHash is a superset of hash.Hash + hash.Hash + // Len returns the number of bytes written to the Hash so far. + Len() uint64 + // State returns a snapshot of the state of the Hash. + State() ([]byte, error) + // Restore resets the Hash to the given state. + Restore(state []byte) error } -// Reset the state of the digester. -func (d *Digester) Reset() { - d.hash.Reset() +// ResumableDigester is a digester that can export its internal state and be +// restored from saved state. +type ResumableDigester interface { + ResumableHash + Digest() Digest } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable.go new file mode 100644 index 000000000000..f2403f61d28a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable.go @@ -0,0 +1,52 @@ +// +build !noresumabledigest + +package digest + +import ( + "fmt" + + "github.com/jlhawn/go-crypto" + // For ResumableHash + _ "github.com/jlhawn/go-crypto/sha256" // For Resumable SHA256 + _ "github.com/jlhawn/go-crypto/sha512" // For Resumable SHA384, SHA512 +) + +// resumableDigester implements ResumableDigester. +type resumableDigester struct { + alg string + crypto.ResumableHash +} + +var resumableHashAlgs = map[string]crypto.Hash{ + "sha256": crypto.SHA256, + "sha384": crypto.SHA384, + "sha512": crypto.SHA512, +} + +// NewResumableDigester creates a new ResumableDigester with the given hashing +// algorithm. +func NewResumableDigester(alg string) (ResumableDigester, error) { + hash, supported := resumableHashAlgs[alg] + if !supported { + return resumableDigester{}, fmt.Errorf("unsupported resumable hash algorithm: %s", alg) + } + + return resumableDigester{ + alg: alg, + ResumableHash: hash.New(), + }, nil +} + +// NewCanonicalResumableDigester creates a ResumableDigester using the default +// digest algorithm. +func NewCanonicalResumableDigester() ResumableDigester { + return resumableDigester{ + alg: "sha256", + ResumableHash: crypto.SHA256.New(), + } +} + +// Digest returns the current digest for this resumable digester. +func (d resumableDigester) Digest() Digest { + return NewDigest(d.alg, d.ResumableHash) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/architecture.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/architecture.md deleted file mode 100644 index d8b96e8dada1..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/architecture.md +++ /dev/null @@ -1,4 +0,0 @@ -# Architecture - -**TODO(stevvooe):** Discuss the architecture of the registry, internally and -externally, in a few different deployment scenarios. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/configuration.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/configuration.md deleted file mode 100644 index f9cbf1976528..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/configuration.md +++ /dev/null @@ -1,314 +0,0 @@ -# Configuration - -Below is a comprehensive example of all possible configuration options for the registry. Some options are mutually exclusive, and each section is explained in more detail below, but this is a good starting point from which you may delete the sections you do not need to create your own configuration. A copy of this configuration can be found at config.sample.yml. - -```yaml -version: 0.1 -loglevel: debug -storage: - filesystem: - rootdirectory: /tmp/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 32000 - rootdirectory: /s3/object/name/prefix -auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle -middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 -reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname -http: - addr: localhost:5000 - prefix: /my/nested/registry/ - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - debug: - addr: localhost:5001 -notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 -``` - -N.B. In some instances a configuration option may be marked **optional** but contain child options marked as **required**. This indicates that a parent may be omitted with all its children, however, if the parent is included, the children marked **required** must be included. - -## version - -```yaml -version: 0.1 -``` - -The version option is **required** and indicates the version of the configuration being used. It is expected to remain a top-level field, to allow for a consistent version check before parsing the remainder of the configuration file. - -N.B. The version of the registry software may be found at [/version/version.go](https://github.com/docker/distribution/blob/master/version/version.go) - -## loglevel - -```yaml -loglevel: debug -``` - -The loglevel option is **required** and sets the sensitivity of logging output. Permitted values are: - -- ```error``` -- ```warn``` -- ```info``` -- ```debug``` - -## storage - -```yaml -storage: - filesystem: - rootdirectory: /tmp/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 32000 - rootdirectory: /s3/object/name/prefix -``` - -The storage option is **required** and defines which storage backend is in use. At the moment only one backend may be configured, an error is returned when the registry is started with more than one storage backend configured. - -The following backends may be configured, **all options for a given storage backend are required**: - -### filesystem - -This storage backend uses the local disk to store registry files. It is ideal for development and may be appropriate for some small scale production applications. - -- rootdirectory: **Required** - This is the absolute path to directory in which the repository will store data. - -### azure - -This storage backend uses Microsoft's Azure Storage platform. - -- accountname: **Required** - Azure account name -- accountkey: **Required** - Azure account key -- container: **Required** - Name of the Azure container into which data will be stored - -### S3 - -This storage backend uses Amazon's Simple Storage Service (a.k.a. S3). - -- accesskey: **Required** - Your AWS Access Key -- secretkey: **Required** - Your AWS Secret Key. -- region: **Required** - The AWS region in which your bucket exists. For the moment, the Go AWS library in use does not use the newer DNS based bucket routing. -- bucket: **Required** - The bucket name in which you want to store the registry's data. -- encrypt: TODO: fill in description -- secure: TODO: fill in description -- v4auth: This indicates whether Version 4 of AWS's authentication should be used. Generally you will want to set this to true. -- chunksize: TODO: fill in description -- rootdirectory: **Optional** - This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary. - -## auth - -```yaml -auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle -``` - -The auth option is **optional** as there are use cases (i.e. a mirror that only permits pulls) for which authentication may not be desired. There are currently 2 possible auth providers, "silly" and "token", only one auth provider may be configured at the moment: - -### silly - -The "silly" auth is only for development purposes. It simply checks for the existence of the "Authorization" header in the HTTP request, with no regard for the value of the header. If the header does not exist, it will respond with a challenge response, echoing back the realm, service, and scope that access was denied for. - -The values of the ```realm``` and ```service``` options are used in authentication reponses, both options are **required** - -- realm: **Required** - The realm in which the registry server authenticates. -- service: **Required** - The service being authenticated. - -### token - -Token based authentication allows the authentication system to be decoupled from the registry. It is a well established authentication paradigm with a high degree of security. - -- realm: **Required** - The realm in which the registry server authenticates. -- service: **Required** - The service being authenticated. -- issuer: **Required** - The name of the token issuer. The issuer inserts this into the token so it must match the value configured for the issuer. -- rootcertbundle: **Required** - The absolute path to the root certificate bundle containing the public part of the certificates that will be used to sign authentication tokens. - -For more information about Token based authentication configuration, see the [specification.](spec/auth/token.md) - -## middleware - -The middleware option is **optional** and allows middlewares to be injected at named hook points. A requirement of all middlewares is that they implement the same interface as the object they're wrapping. This means a registry middleware must implement the `distribution.Registry` interface, repository middleware must implement `distribution.Respository`, and storage middleware must implement `driver.StorageDriver`. - -Currently only one middleware, cloudfront, a storage middleware, is included in the registry. - -```yaml -middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 -``` - -Each middleware entry has `name` and `options` entries. The `name` must correspond to the name under which the middleware registers itself. The `options` field is a map that details custom configuration required to initialize the middleware. It is treated as a map[string]interface{} and as such will support any interesting structures desired, leaving it up to the middleware initialization function to best determine how to handle the specific interpretation of the options. - -### cloudfront - -- baseurl: **Required** - SCHEME://HOST[/PATH] at which Cloudfront is served. -- privatekey: **Required** - Private Key for Cloudfront provided by AWS -- keypairid: **Required** - Key Pair ID provided by AWS -- duration: **Optional** - Duration for which a signed URL should be valid - -## reporting - -```yaml -reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname -``` - -The reporting option is **optional** and configures error and metrics reporting tools. At the moment only two services are supported, New Relic and Bugsnag, a valid configuration may contain both. - -### bugsnag - -- apikey: **Required** - API Key provided by Bugsnag -- releasestage: **Optional** - TODO: fill in description -- endpoint: **Optional** - TODO: fill in description - -### newrelic - -- licensekey: **Required** - License key provided by New Relic -- name: **Optional** - New Relic application name - -## http - -```yaml -http: - addr: localhost:5000 - prefix: /my/nested/registry/ - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - debug: - addr: localhost:5001 -``` - -The http option details the configuration for the HTTP server that hosts the registry. - -- addr: **Required** - The HOST:PORT for which the server should accept connections. -- prefix: **Optional** - If the server will not run at the root path, this should specify the prefix (the part of the path before ```v2```). It should have both preceding and trailing slashes. -- secret: A random piece of data. It is used to sign state that may be stored with the client to protect against tampering. For production use you should generate a random piece of data using a cryptographically secure random generator. - -### tls - -The tls option within http is **optional** and allows you to configure SSL for the server. If you already have a server such as Nginx or Apache running on the same host as the registry, you may prefer to configure SSL termination there and proxy connections to the registry server. - -- certificate: **Required** - Absolute path to x509 cert file -- key: **Required** - Absolute path to x509 private key file - -### debug - -The debug option is **optional** and allows you to configure a debug server that can be helpful in diagnosing problems. It is of most use to contributers to the distribution repository and should generally be disabled in production deployments. - -- addr: **Required** - The HOST:PORT on which the debug server should accept connections. - - -## notifications - -```yaml -notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 -``` - -The notifications option is **optional** and currently may contain a single option, ```endpoints```. - -### endpoints - -Endpoints is a list of named services (URLs) that can accept event notifications. - -- name: **Required** - A human readable name for the service. -- disabled: **Optional** - A boolean to enable/disable notifications for a service. -- url: **Required** - The URL to which events should be published. -- headers: **Required** - TODO: fill in description -- timeout: **Required** - TODO: fill in description -- threshold: **Required** - TODO: fill in description -- backoff: **Required** - TODO: fill in description - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/deploying.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/deploying.md deleted file mode 100644 index f30e32eb76d4..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/deploying.md +++ /dev/null @@ -1,6 +0,0 @@ -# Deploying - -**TODO(stevvooe):** This should discuss various deployment scenarios for -production-ready deployments. These may be backed by ready-made docker images -but this should explain how they were created and what considerations were -present. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/glossary.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/glossary.md deleted file mode 100644 index 68fef2154c20..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/glossary.md +++ /dev/null @@ -1,39 +0,0 @@ -# Glossary - -**TODO(stevvooe):** Define and describe distribution related terms. Ideally, -we reference back to the actual documentation and specifications where -appropriate. - -**TODO(stevvooe):** The following list is a start but woefully incomplete. - -
-
Blob
-
- The primary unit of registry storage. A string of bytes identified by - content-address, known as a _digest_. -
- -
Image
-
An image is a collection of content from which a docker container can be created.
- -
Layer
-
- A tar file representing the partial content of a filesystem. Several - layers can be "stacked" to make up the root filesystem. -
- -
Manifest
-
Describes a collection layers that make up an image.
- -
Registry
-
A registry is a collection of repositories.
- -
Repository
-
- A repository is a collection of docker images, made up of manifests, tags - and layers. The base unit of these components are blobs. -
- -
Tag
-
Tag provides a common name to an image.
-
\ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/notifications.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/notifications.md deleted file mode 100644 index 5dbd8f347cad..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/notifications.md +++ /dev/null @@ -1,4 +0,0 @@ -# Notifications - -**TODO(stevvooe)** Cover use and deployment of webhook notifications. Link to -description in architecture documentation. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/overview.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/overview.md deleted file mode 100644 index 65cf56b7f218..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/overview.md +++ /dev/null @@ -1,6 +0,0 @@ -# Overview - -**TODO(stevvooe):** Table of contents. - -**TODO(stevvooe):** Include a full overview of each component and dispatch the -user to the correct documentation. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/api.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/api.md deleted file mode 100644 index 1f525f89bca0..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/api.md +++ /dev/null @@ -1,2665 +0,0 @@ -# Docker Registry HTTP API V2 - -## Introduction - -The _Docker Registry HTTP API_ is the protocol to facilitate distribution of -images to the docker engine. It interacts with instances of the docker -registry, which is a service to manage information about docker images and -enable their distribution. The specification covers the operation of version 2 -of this API, known as _Docker Registry HTTP API V2_. - -While the V1 registry protocol is usable, there are several problems with the -architecture that have led to this new version. The main driver of this -specification these changes to the docker the image format, covered in -docker/docker#8093. The new, self-contained image manifest simplifies image -definition and improves security. This specification will build on that work, -leveraging new properties of the manifest format to improve performance, -reduce bandwidth usage and decrease the likelihood of backend corruption. - -For relevant details and history leading up to this specification, please see -the following issues: - -- [docker/docker#8093](https://github.com/docker/docker/issues/8903) -- [docker/docker#9015](https://github.com/docker/docker/issues/9015) -- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) - -### Scope - -This specification covers the URL layout and protocols of the interaction -between docker registry and docker core. This will affect the docker core -registry API and the rewrite of docker-registry. Docker registry -implementations may implement other API endpoints, but they are not covered by -this specification. - -This includes the following features: - -- Namespace-oriented URI Layout -- PUSH/PULL registry server for V2 image manifest format -- Resumable layer PUSH support -- V2 Client library implementation - -While authentication and authorization support will influence this -specification, details of the protocol will be left to a future specification. -Relevant header definitions and error codes are present to provide an -indication of what a client may encounter. - -#### Future - -There are features that have been discussed during the process of cutting this -specification. The following is an incomplete list: - -- Immutable image references -- Multiple architecture support -- Migration from v2compatibility representation - -These may represent features that are either out of the scope of this -specification, the purview of another specification or have been deferred to a -future version. - -### Use Cases - -For the most part, the use cases of the former registry API apply to the new -version. Differentiating use cases are covered below. - -#### Image Verification - -A docker engine instance would like to run verified image named -"library/ubuntu", with the tag "latest". The engine contacts the registry, -requesting the manifest for "library/ubuntu:latest". An untrusted registry -returns a manifest. Before proceeding to download the individual layers, the -engine verifies the manifest's signature, ensuring that the content was -produced from a trusted source and no tampering has occured. After each layer -is downloaded, the engine verifies the digest of the layer, ensuring that the -content matches that specified by the manifest. - -#### Resumable Push - -Company X's build servers lose connectivity to docker registry before -completing an image layer transfer. After connectivity returns, the build -server attempts to re-upload the image. The registry notifies the build server -that the upload has already been partially attempted. The build server -responds by only sending the remaining data to complete the image file. - -#### Resumable Pull - -Company X is having more connectivity problems but this time in their -deployment datacenter. When downloading an image, the connection is -interrupted before completion. The client keeps the partial data and uses http -`Range` requests to avoid downloading repeated data. - -#### Layer Upload De-duplication - -Company Y's build system creates two identical docker layers from build -processes A and B. Build process A completes uploading the layer before B. -When process B attempts to upload the layer, the registry indicates that its -not necessary because the layer is already known. - -If process A and B upload the same layer at the same time, both operations -will proceed and the first to complete will be stored in the registry (Note: -we may modify this to prevent dogpile with some locking mechanism). - -### Changes - -The V2 specification has been written to work as a living document, specifying -only what is certain and leaving what is not specified open or to future -changes. Only non-conflicting additions should be made to the API and accepted -changes should avoid preventing future changes from happening. - -This section should be updated when changes are made to the specification, -indicating what is different. Optionally, we may start marking parts of the -specification to correspond with the versions enumerated here. - -
-
2.0.1
-
-
    -
  • Added support for immutable manifest references in manifest endpoints.
  • -
  • Deleting a manifest by tag has been deprecated.
  • -
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • -
  • Added error code for unsupported operations.
  • -
-
- -
2.0
-
- This is the baseline specification. -
-
- -## Overview - -This section covers client flows and details of the API endpoints. The URI -layout of the new API is structured to support a rich authentication and -authorization model by leveraging namespaces. All endpoints will be prefixed -by the API version and the repository name: - - /v2// - -For example, an API endpoint that will work with the `library/ubuntu` -repository, the URI prefix will be: - - /v2/library/ubuntu/ - -This scheme provides rich access control over various operations and methods -using the URI prefix and http methods that can be controlled in variety of -ways. - -Classically, repository names have always been two path components where each -path component is less than 30 characters. The V2 registry API does not -enforce this. The rules for a repository name are as follows: - -1. A repository name is broken up into _path components_. A component of a - repository name must be at least two lowercase, alpha-numeric characters, - optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*` and the - matched result must be 2 or more characters in length. -2. The name of a repository must have at least two path components, separated - by a forward slash. -3. The total length of a repository name, including slashes, must be less the - 256 characters. - -These name requirements _only_ apply to the registry API and should accept a -superset of what is supported by other docker ecosystem components. - -All endpoints should support aggressive http caching, compression and range -headers, where appropriate. The new API attempts to leverage HTTP semantics -where possible but may break from standards to implement targeted features. - -For detail on individual endpoints, please see the [_Detail_](#detail) -section. - -### Errors - -Actionable failure conditions, covered in detail in their relevant sections, -are reported as part of 4xx responses, in a json response body. One or more -errors will be returned in the following format: - - { - "errors:" [{ - "code": , - "message": , - "detail": - }, - ... - ] - } - -The `code` field will be a unique identifier, all caps with underscores by -convention. The `message` field will be a human readable string. The optional -`detail` field may contain arbitrary json data providing information the -client can use to resolve the issue. - -While the client can take action on certain error codes, the registry may add -new error codes over time. All client implementations should treat unknown -error codes as `UNKNOWN`, allowing future error codes to be added without -breaking API compatibility. For the purposes of the specification error codes -will only be added and never removed. - -For a complete account of all error codes, please see the _Detail_ section. - -### API Version Check - -A minimal endpoint, mounted at `/v2/` will provide version support information -based on its response statuses. The request format is as follows: - - GET /v2/ - -If a `200 OK` response is returned, the registry implements the V2(.1) -registry API and the client may proceed safely with other V2 operations. -Optionally, the response may contain information about the supported paths in -the response body. The client should be prepared to ignore this data. - -If a `401 Unauthorized` response is returned, the client should take action -based on the contents of the "WWW-Authenticate" header and try the endpoint -again. Depending on access control setup, the client may still have to -authenticate against different resources, even if this check succeeds. - -If `404 Not Found` response status, or other unexpected status, is returned, -the client should proceed with the assumption that the registry does not -implement V2 of the API. - -### Pulling An Image - -An "image" is a combination of a JSON manifest and individual layer files. The -process of pulling an image centers around retrieving these two components. - -The first step in pulling an image is to retrieve the manifest. For reference, -the relevant manifest fields for the registry are the following: - - field | description | -----------|------------------------------------------------| -name | The name of the image. | -tag | The tag for this version of the image. | -fsLayers | A list of layer descriptors (including tarsum) | -signature | A JWS used to verify the manifest content | - -For more information about the manifest format, please see -[docker/docker#8093](https://github.com/docker/docker/issues/8093). - -When the manifest is in hand, the client must verify the signature to ensure -the names and layers are valid. Once confirmed, the client will then use the -tarsums to download the individual layers. Layers are stored in as blobs in -the V2 registry API, keyed by their tarsum digest. - -#### Pulling an Image Manifest - -The image manifest can be fetched with the following url: - -``` -GET /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful, the image -manifest will be returned, with the following format (see docker/docker#8093 -for details): - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": - } - -The client should verify the returned manifest signature for authenticity -before fetching layers. - -#### Pulling a Layer - -Layers are stored in the blob portion of the registry, keyed by tarsum digest. -Pulling a layer is carried out by a standard http request. The URL is as -follows: - - GET /v2//blobs/ - -Access to a layer will be gated by the `name` of the repository but is -identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an -opaque field, to be interpreted by the tarsum library. - -This endpoint may issue a 307 (302 for /blobs/uploads/ -``` - -The parameters of this request are the image namespace under which the layer -will be linked. Responses to this request are covered below. - -##### Existing Layers - -The existence of a layer can be checked via a `HEAD` request to the blob store -API. The request should be formatted as follows: - -``` -HEAD /v2//blobs/ -``` - -If the layer with the tarsum specified in `digest` is available, a 200 OK -response will be received, with no actual body content (this is according to -http specification). The response will look as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - -When this response is received, the client can assume that the layer is -already available in the registry under the given name and should take no -further action to upload the layer. Note that the binary digests may differ -for the existing registry layer, but the tarsums will be guaranteed to match. - -##### Uploading the Layer - -If the POST request is successful, a `202 Accepted` response will be returned -with the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The rest of the upload process can be carried out with the returned url, -called the "Upload URL" from the `Location` header. All responses to the -upload url, whether sending data or getting status, will be in this format. -Though the URI format (`/v2//blobs/uploads/`) for the `Location` -header is specified, clients should treat it as an opaque url and should never -try to assemble the it. While the `uuid` parameter may be an actual UUID, this -proposal imposes no constraints on the format and clients should never impose -any. - -If clients need to correlate local upload state with remote upload state, the -contents of the `Docker-Upload-UUID` header should be used. Such an id can be -used to key the last used location header when implementing resumable uploads. - -##### Upload Progress - -The progress and chunk coordination of the upload process will be coordinated -through the `Range` header. While this is a non-standard use of the `Range` -header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. -For an upload that just started, for an example with a 1000 byte layer file, -the `Range` header would be as follows: - -``` -Range: bytes=0-0 -``` - -To get the status of an upload, issue a GET request to the upload URL: - -``` -GET /v2//blobs/uploads/ -Host: -``` - -The response will be similar to the above, except will return 204 status: - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: bytes=0- -Docker-Upload-UUID: -``` - -Note that the HTTP `Range` header byte ranges are inclusive and that will be -honored, even in non-standard use cases. - -##### Monolithic Upload - -A monolithic upload is simply a chunked upload with a single chunk and may be -favored by clients that would like to avoided the complexity of chunking. To -carry out a "monolithic" upload, one can simply put the entire content blob to -the provided URL: - -``` -PUT /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -The "digest" parameter must be included with the PUT request. Please see the -_Completed Upload_ section for details on the parameters and expected -responses. - -Additionally, the download can be completed with a single `POST` request to -the uploads endpoint, including the "size" and "digest" parameters: - -``` -POST /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -On the registry service, this should allocate a download, accept and verify -the data and return the same response as the final chunk of an upload. If the -POST request fails collecting the data in any way, the registry should attempt -to return an error response to the client with the `Location` header providing -a place to continue the download. - -The single `POST` method is provided for convenience and most clients should -implement `POST` + `PUT` to support reliable resume of uploads. - -##### Chunked Upload - -To carry out an upload of a chunk, the client can specify a range header and -only include that part of the layer file: - -``` -PATCH /v2//blobs/uploads/ -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -There is no enforcement on layer chunk splits other than that the server must -receive them in order. The server may enforce a minimum chunk size. If the -server cannot accept the chunk, a `416 Requested Range Not Satisfiable` -response will be returned and will include a `Range` header indicating the -current status: - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -If this response is received, the client should resume from the "last valid -range" and upload the subsequent chunk. A 416 will be returned under the -following conditions: - -- Invalid Content-Range header format -- Out of order chunk: the range of the next chunk must start immediately after - the "last valid range" from the previous response. - -When a chunk is accepted as part of the upload, a `202 Accepted` response will -be returned, including a `Range` header with the current upload status: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -##### Completed Upload - -For an upload to be considered complete, the client must submit a `PUT` -request on the upload endpoint with a digest parameter. If it is not provided, -the download will not be considered complete. The format for the final chunk -will be as follows: - -``` -PUT /v2//blob/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated -the upload. Multiple "digest" parameters may be provided with different -digests. The server may verify none or all of them but _must_ notify the -client if the content is rejected. - -When the last chunk is received and the layer has been validated, the client -will receive a `201 Created` response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -###### Digest Parameter - -The "digest" parameter is designed as an opaque parameter to support -verification of a successful transfer. The initial version of the registry API -will support a tarsum digest, in the standard tarsum format. For example, a -HTTP URI parameter might be as follows: - -``` -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Given this parameter, the registry will verify that the provided content does -result in this tarsum. Optionally, the registry can support other other digest -parameters for non-tarfile content stored as a layer. A regular hash digest -might be specified as follows: - -``` -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Such a parameter would be used to verify that the binary content (as opposed -to the tar content) would be verified at the end of the upload process. - -For the initial version, registry servers are only required to support the -tarsum format. - -##### Canceling an Upload - -An upload can be cancelled by issuing a DELETE request to the upload endpoint. -The format will be as follows: - -``` -DELETE /v2//blobs/uploads/ -``` - -After this request is issued, the upload uuid will no longer be valid and the -registry server will dump all intermediate data. While uploads will time out -if not completed, clients should issue this request if they encounter a fatal -error but still have the ability to issue an http request. - -##### Errors - -If an 502, 503 or 504 error is received, the client should assume that the -download can proceed due to a temporary condition, honoring the appropriate -retry mechanism. Other 5xx errors should be treated as terminal. - -If there is a problem with the upload, a 4xx error will be returned indicating -the problem. After receiving a 4xx response (except 416, as called out above), -the upload will be considered failed and the client should take appropriate -action. - -Note that the upload url will not be available forever. If the upload uuid is -unknown to the registry, a `404 Not Found` response will be returned and the -client must restart the upload process. - -#### Pushing an Image Manifest - -Once all of the layers for an image are uploaded, the client can upload the -image manifest. An image can be pushed using the following request format: - - PUT /v2//manifests/ - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": , - ... - } - -The `name` and `reference` fields of the response body must match those specified in -the URL. The `reference` field may be a "tag" or a "digest". - -If there is a problem with pushing the manifest, a relevant 4xx response will -be returned with a JSON error message. Please see the _PUT Manifest section -for details on possible error codes that may be returned. - -If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are -returned. The `detail` field of the error response will have a `digest` field -identifying the missing blob, which will be a tarsum. An error is returned for -each unknown blob. The response format is as follows: - - { - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] - } - -#### Listing Image Tags - -It may be necessary to list all of the tags under a given repository. The tags -for an image repository can be retrieved with the following request: - - GET /v2//tags/list - -The response will be in the following format: - - 200 OK - Content-Type: application/json - - { - "name": , - "tags": [ - , - ... - ] - } - -For repositories with a large number of tags, this response may be quite -large, so care should be taken by the client when parsing the response to -reduce copying. - -### Deleting an Image - -An image may be deleted from the registry via its `name` and `reference`. A -delete may be issued with the following request format: - - DELETE /v2//manifests/ - -For deletes, `reference` *must* be a digest or the delete will fail. If the -image exists and has been successfully deleted, the following response will be -issued: - - 202 Accepted - Content-Length: None - -If the image had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -## Detail - -> **Note**: This section is still under construction. For the purposes of -> implementation, if any details below differ from the described request flows -> above, the section below should be corrected. When they match, this note -> should be removed. - -The behavior of the endpoints are covered in detail in this section, organized -by route and entity. All aspects of the request and responses are covered, -including headers, parameters and body formats. Examples of requests and their -corresponding responses, with success and failure, are enumerated. - -> **Note**: The sections on endpoint detail are arranged with an example -> request, a description of the request, followed by information about that -> request. - -A list of methods and URIs are covered in the table below: - -|Method|Path|Entity|Description| --------|----|------|------------ -| GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | -| GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | -| GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | -| POST | `/v2//blobs/uploads/` | Intiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | -| GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | -| PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | -| PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | -| DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | - - -The detail for each endpoint is covered in the following sections. - -### Errors - -The error codes encountered via the API are enumerated in the following table: - -|Code|Message|Description| --------|----|------|------------ - `UNKNOWN` | unknown error | Generic error returned when the error does not have an API classification. - `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. - `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. - `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. - `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. - `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. - `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. - `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. - `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. - `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. - `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. - `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. - `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. - `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. - - - -### Base - -Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authorization. - - - -#### GET Base - -Check that the endpoint implements Docker Registry API V2. - - - -``` -GET /v2/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| - - - - -###### On Success: OK - -``` -200 OK -``` - -The API implements V2 protocol and is accessible. - - - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authorized to access the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -``` - -The registry does not implement the V2 API. - - - - - -### Tags - -Retrieve information about tags. - - - -#### GET Tags - -Fetch the tags under the repository identified by `name`. - - - -``` -GET /v2//tags/list -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -A list of tags for the named repository. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - - - -### Manifest - -Create, update and retrieve manifests. - - - -#### GET Manifest - -Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. - - - -``` -GET /v2//manifests/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`tag`|path|Tag of the target manifiest.| - - - - -###### On Success: OK - -``` -200 OK -Docker-Content-Digest: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": -} -``` - -The manifest idenfied by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The name or reference was invalid. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The named manifest is not known to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | - - - - -#### PUT Manifest - -Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. - - - -``` -PUT /v2//manifests/ -Host: -Authorization: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": -} -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`tag`|path|Tag of the target manifiest.| - - - - -###### On Success: Accepted - -``` -202 Accepted -Location: -Content-Length: 0 -Docker-Content-Digest: -``` - -The manifest has been accepted by the registry and is stored under the specified `name` and `tag`. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The canonical location url of the uploaded manifest.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Invalid Manifest - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | -| `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | -| `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have permission to push to the repository. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Missing Layer(s) - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] -} -``` - -One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - - - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - - -#### DELETE Manifest - -Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest. - - - -``` -DELETE /v2//manifests/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`tag`|path|Tag of the target manifiest.| - - - - -###### On Success: Accepted - -``` -202 Accepted -``` - - - - - - - -###### On Failure: Invalid Name or Tag - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The specified `name` or `tag` were invalid and the delete was unable to proceed. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - - - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Unknown Manifest - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The specified `name` or `tag` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | - - - - - -### Blob - -Fetch the blob identified by `name` and `digest`. Used to fetch layers by tarsum digest. - - - -#### GET Blob - -Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. - - -##### Fetch Blob - -``` -GET /v2//blobs/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Docker-Content-Digest: -Content-Type: application/octet-stream - - -``` - -The blob identified by `digest` is available. The blob content will be present in the body of the request. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The length of the requested blob content.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - -###### On Success: Temporary Redirect - -``` -307 Temporary Redirect -Location: -Docker-Content-Digest: -``` - -The blob identified by `digest` is available at the provided location. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location where the layer should be accessible.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The blob, identified by `name` and `digest`, is unknown to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -##### Fetch Blob Part - -``` -GET /v2//blobs/ -Host: -Authorization: -Range: bytes=- -``` - -This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Range`|header|HTTP Range header specifying blob chunk.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: Partial Content - -``` -206 Partial Content -Content-Length: -Content-Range: bytes -/ -Content-Type: application/octet-stream - - -``` - -The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The length of the requested blob chunk.| -|`Content-Range`|Content range of blob chunk.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -``` - -The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content. - - - - - -### Intiate Blob Upload - -Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. - - - -#### POST Intiate Blob Upload - -Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. - - -##### Initiate Monolithic Blob Upload - -``` -POST /v2//blobs/uploads/?digest= -Host: -Authorization: -Content-Length: -Content-Type: application/octect-stream - - -``` - -Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|| -|`name`|path|Name of the target repository.| -|`digest`|query|Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.| - - - - -###### On Success: Created - -``` -201 Created -Location: -Content-Length: 0 -Docker-Upload-UUID: -``` - -The blob has been created in the registry and is available at the provided location. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -##### Initiate Resumable Blob Upload - -``` -POST /v2//blobs/uploads/ -Host: -Authorization: -Content-Length: 0 -``` - -Initiate a resumable blob upload with an empty request body. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| -|`name`|path|Name of the target repository.| - - - - -###### On Success: Accepted - -``` -202 Accepted -Content-Length: 0 -Location: /v2//blobs/uploads/ -Range: 0-0 -Docker-Upload-UUID: -``` - -The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Location`|The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - - - -### Blob Upload - -Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls. - - - -#### GET Blob Upload - -Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. - - - -``` -GET /v2//blobs/uploads/ -Host: -Authorization: -``` - -Retrieve the progress of the current upload, as reported by the `Range` header. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept almost anything.| - - - - -###### On Success: Upload Progress - -``` -204 No Content -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The upload is known and in progress. The last received offset is available in the `Range` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - - -#### PATCH Blob Upload - -Upload a chunk of data for the specified upload. - - - -``` -PATCH /v2//blobs/uploads/ -Host: -Authorization: -Content-Range: - -Content-Length: -Content-Type: application/octet-stream - - -``` - -Upload a chunk of data to specified upload without completing the upload. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Range`|header|Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.| -|`Content-Length`|header|Length of the chunk being uploaded, corresponding the length of the request body.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept almost anything.| - - - - -###### On Success: Chunk Accepted - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -``` - -The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. - - - - -#### PUT Blob Upload - -Complete the upload specified by `uuid`, optionally appending the body as the final chunk. - - - -``` -PUT /v2//blobs/uploads/?digest= -Host: -Authorization: -Content-Range: - -Content-Length: -Content-Type: application/octet-stream - - -``` - -Complete the upload, providing the _final_ chunk of data, if necessary. This method may take a body with all the data. If the `Content-Range` header is specified, it may include the final chunk. A request without a body will just complete the upload with previously uploaded content. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Range`|header|Range of bytes identifying the block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header. May be omitted if no data is provided.| -|`Content-Length`|header|Length of the chunk being uploaded, corresponding to the length of the request body. May be zero if no data is provided.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept almost anything.| -|`digest`|query|Digest of uploaded blob.| - - - - -###### On Success: Upload Complete - -``` -204 No Content -Location: -Content-Range: - -Content-Length: -Docker-Content-Digest: -``` - -The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|| -|`Content-Range`|Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.| -|`Content-Length`|Length of the chunk being uploaded, corresponding the length of the request body.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -``` - -The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. The contents of the `Range` header may be used to resolve the condition. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range indicating the current progress of the upload.| - - - - -#### DELETE Blob Upload - -Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. - - - -``` -DELETE /v2//blobs/uploads/ -Host: -Authorization: -Content-Length: 0 -``` - -Cancel the upload specified by `uuid`. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept almost anything.| - - - - -###### On Success: Upload Deleted - -``` -204 No Content -Content-Length: 0 -``` - -The upload has been successfully deleted. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -An error was encountered processing the delete. The client may ignore this error. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - - - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/api.md.tmpl b/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/api.md.tmpl deleted file mode 100644 index 6d3227539737..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/api.md.tmpl +++ /dev/null @@ -1,796 +0,0 @@ -# Docker Registry HTTP API V2 - -## Introduction - -The _Docker Registry HTTP API_ is the protocol to facilitate distribution of -images to the docker engine. It interacts with instances of the docker -registry, which is a service to manage information about docker images and -enable their distribution. The specification covers the operation of version 2 -of this API, known as _Docker Registry HTTP API V2_. - -While the V1 registry protocol is usable, there are several problems with the -architecture that have led to this new version. The main driver of this -specification these changes to the docker the image format, covered in -docker/docker#8093. The new, self-contained image manifest simplifies image -definition and improves security. This specification will build on that work, -leveraging new properties of the manifest format to improve performance, -reduce bandwidth usage and decrease the likelihood of backend corruption. - -For relevant details and history leading up to this specification, please see -the following issues: - -- [docker/docker#8093](https://github.com/docker/docker/issues/8903) -- [docker/docker#9015](https://github.com/docker/docker/issues/9015) -- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) - -### Scope - -This specification covers the URL layout and protocols of the interaction -between docker registry and docker core. This will affect the docker core -registry API and the rewrite of docker-registry. Docker registry -implementations may implement other API endpoints, but they are not covered by -this specification. - -This includes the following features: - -- Namespace-oriented URI Layout -- PUSH/PULL registry server for V2 image manifest format -- Resumable layer PUSH support -- V2 Client library implementation - -While authentication and authorization support will influence this -specification, details of the protocol will be left to a future specification. -Relevant header definitions and error codes are present to provide an -indication of what a client may encounter. - -#### Future - -There are features that have been discussed during the process of cutting this -specification. The following is an incomplete list: - -- Immutable image references -- Multiple architecture support -- Migration from v2compatibility representation - -These may represent features that are either out of the scope of this -specification, the purview of another specification or have been deferred to a -future version. - -### Use Cases - -For the most part, the use cases of the former registry API apply to the new -version. Differentiating use cases are covered below. - -#### Image Verification - -A docker engine instance would like to run verified image named -"library/ubuntu", with the tag "latest". The engine contacts the registry, -requesting the manifest for "library/ubuntu:latest". An untrusted registry -returns a manifest. Before proceeding to download the individual layers, the -engine verifies the manifest's signature, ensuring that the content was -produced from a trusted source and no tampering has occured. After each layer -is downloaded, the engine verifies the digest of the layer, ensuring that the -content matches that specified by the manifest. - -#### Resumable Push - -Company X's build servers lose connectivity to docker registry before -completing an image layer transfer. After connectivity returns, the build -server attempts to re-upload the image. The registry notifies the build server -that the upload has already been partially attempted. The build server -responds by only sending the remaining data to complete the image file. - -#### Resumable Pull - -Company X is having more connectivity problems but this time in their -deployment datacenter. When downloading an image, the connection is -interrupted before completion. The client keeps the partial data and uses http -`Range` requests to avoid downloading repeated data. - -#### Layer Upload De-duplication - -Company Y's build system creates two identical docker layers from build -processes A and B. Build process A completes uploading the layer before B. -When process B attempts to upload the layer, the registry indicates that its -not necessary because the layer is already known. - -If process A and B upload the same layer at the same time, both operations -will proceed and the first to complete will be stored in the registry (Note: -we may modify this to prevent dogpile with some locking mechanism). - -### Changes - -The V2 specification has been written to work as a living document, specifying -only what is certain and leaving what is not specified open or to future -changes. Only non-conflicting additions should be made to the API and accepted -changes should avoid preventing future changes from happening. - -This section should be updated when changes are made to the specification, -indicating what is different. Optionally, we may start marking parts of the -specification to correspond with the versions enumerated here. - -
-
2.0.1
-
-
    -
  • Added support for immutable manifest references in manifest endpoints.
  • -
  • Deleting a manifest by tag has been deprecated.
  • -
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • -
  • Added error code for unsupported operations.
  • -
-
- -
2.0
-
- This is the baseline specification. -
-
- -## Overview - -This section covers client flows and details of the API endpoints. The URI -layout of the new API is structured to support a rich authentication and -authorization model by leveraging namespaces. All endpoints will be prefixed -by the API version and the repository name: - - /v2// - -For example, an API endpoint that will work with the `library/ubuntu` -repository, the URI prefix will be: - - /v2/library/ubuntu/ - -This scheme provides rich access control over various operations and methods -using the URI prefix and http methods that can be controlled in variety of -ways. - -Classically, repository names have always been two path components where each -path component is less than 30 characters. The V2 registry API does not -enforce this. The rules for a repository name are as follows: - -1. A repository name is broken up into _path components_. A component of a - repository name must be at least two lowercase, alpha-numeric characters, - optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*` and the - matched result must be 2 or more characters in length. -2. The name of a repository must have at least two path components, separated - by a forward slash. -3. The total length of a repository name, including slashes, must be less the - 256 characters. - -These name requirements _only_ apply to the registry API and should accept a -superset of what is supported by other docker ecosystem components. - -All endpoints should support aggressive http caching, compression and range -headers, where appropriate. The new API attempts to leverage HTTP semantics -where possible but may break from standards to implement targeted features. - -For detail on individual endpoints, please see the [_Detail_](#detail) -section. - -### Errors - -Actionable failure conditions, covered in detail in their relevant sections, -are reported as part of 4xx responses, in a json response body. One or more -errors will be returned in the following format: - - { - "errors:" [{ - "code": , - "message": , - "detail": - }, - ... - ] - } - -The `code` field will be a unique identifier, all caps with underscores by -convention. The `message` field will be a human readable string. The optional -`detail` field may contain arbitrary json data providing information the -client can use to resolve the issue. - -While the client can take action on certain error codes, the registry may add -new error codes over time. All client implementations should treat unknown -error codes as `UNKNOWN`, allowing future error codes to be added without -breaking API compatibility. For the purposes of the specification error codes -will only be added and never removed. - -For a complete account of all error codes, please see the _Detail_ section. - -### API Version Check - -A minimal endpoint, mounted at `/v2/` will provide version support information -based on its response statuses. The request format is as follows: - - GET /v2/ - -If a `200 OK` response is returned, the registry implements the V2(.1) -registry API and the client may proceed safely with other V2 operations. -Optionally, the response may contain information about the supported paths in -the response body. The client should be prepared to ignore this data. - -If a `401 Unauthorized` response is returned, the client should take action -based on the contents of the "WWW-Authenticate" header and try the endpoint -again. Depending on access control setup, the client may still have to -authenticate against different resources, even if this check succeeds. - -If `404 Not Found` response status, or other unexpected status, is returned, -the client should proceed with the assumption that the registry does not -implement V2 of the API. - -### Pulling An Image - -An "image" is a combination of a JSON manifest and individual layer files. The -process of pulling an image centers around retrieving these two components. - -The first step in pulling an image is to retrieve the manifest. For reference, -the relevant manifest fields for the registry are the following: - - field | description | -----------|------------------------------------------------| -name | The name of the image. | -tag | The tag for this version of the image. | -fsLayers | A list of layer descriptors (including tarsum) | -signature | A JWS used to verify the manifest content | - -For more information about the manifest format, please see -[docker/docker#8093](https://github.com/docker/docker/issues/8093). - -When the manifest is in hand, the client must verify the signature to ensure -the names and layers are valid. Once confirmed, the client will then use the -tarsums to download the individual layers. Layers are stored in as blobs in -the V2 registry API, keyed by their tarsum digest. - -#### Pulling an Image Manifest - -The image manifest can be fetched with the following url: - -``` -GET /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful, the image -manifest will be returned, with the following format (see docker/docker#8093 -for details): - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": - } - -The client should verify the returned manifest signature for authenticity -before fetching layers. - -#### Pulling a Layer - -Layers are stored in the blob portion of the registry, keyed by tarsum digest. -Pulling a layer is carried out by a standard http request. The URL is as -follows: - - GET /v2//blobs/ - -Access to a layer will be gated by the `name` of the repository but is -identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an -opaque field, to be interpreted by the tarsum library. - -This endpoint may issue a 307 (302 for /blobs/uploads/ -``` - -The parameters of this request are the image namespace under which the layer -will be linked. Responses to this request are covered below. - -##### Existing Layers - -The existence of a layer can be checked via a `HEAD` request to the blob store -API. The request should be formatted as follows: - -``` -HEAD /v2//blobs/ -``` - -If the layer with the tarsum specified in `digest` is available, a 200 OK -response will be received, with no actual body content (this is according to -http specification). The response will look as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - -When this response is received, the client can assume that the layer is -already available in the registry under the given name and should take no -further action to upload the layer. Note that the binary digests may differ -for the existing registry layer, but the tarsums will be guaranteed to match. - -##### Uploading the Layer - -If the POST request is successful, a `202 Accepted` response will be returned -with the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The rest of the upload process can be carried out with the returned url, -called the "Upload URL" from the `Location` header. All responses to the -upload url, whether sending data or getting status, will be in this format. -Though the URI format (`/v2//blobs/uploads/`) for the `Location` -header is specified, clients should treat it as an opaque url and should never -try to assemble the it. While the `uuid` parameter may be an actual UUID, this -proposal imposes no constraints on the format and clients should never impose -any. - -If clients need to correlate local upload state with remote upload state, the -contents of the `Docker-Upload-UUID` header should be used. Such an id can be -used to key the last used location header when implementing resumable uploads. - -##### Upload Progress - -The progress and chunk coordination of the upload process will be coordinated -through the `Range` header. While this is a non-standard use of the `Range` -header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. -For an upload that just started, for an example with a 1000 byte layer file, -the `Range` header would be as follows: - -``` -Range: bytes=0-0 -``` - -To get the status of an upload, issue a GET request to the upload URL: - -``` -GET /v2//blobs/uploads/ -Host: -``` - -The response will be similar to the above, except will return 204 status: - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: bytes=0- -Docker-Upload-UUID: -``` - -Note that the HTTP `Range` header byte ranges are inclusive and that will be -honored, even in non-standard use cases. - -##### Monolithic Upload - -A monolithic upload is simply a chunked upload with a single chunk and may be -favored by clients that would like to avoided the complexity of chunking. To -carry out a "monolithic" upload, one can simply put the entire content blob to -the provided URL: - -``` -PUT /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -The "digest" parameter must be included with the PUT request. Please see the -_Completed Upload_ section for details on the parameters and expected -responses. - -Additionally, the download can be completed with a single `POST` request to -the uploads endpoint, including the "size" and "digest" parameters: - -``` -POST /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -On the registry service, this should allocate a download, accept and verify -the data and return the same response as the final chunk of an upload. If the -POST request fails collecting the data in any way, the registry should attempt -to return an error response to the client with the `Location` header providing -a place to continue the download. - -The single `POST` method is provided for convenience and most clients should -implement `POST` + `PUT` to support reliable resume of uploads. - -##### Chunked Upload - -To carry out an upload of a chunk, the client can specify a range header and -only include that part of the layer file: - -``` -PATCH /v2//blobs/uploads/ -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -There is no enforcement on layer chunk splits other than that the server must -receive them in order. The server may enforce a minimum chunk size. If the -server cannot accept the chunk, a `416 Requested Range Not Satisfiable` -response will be returned and will include a `Range` header indicating the -current status: - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -If this response is received, the client should resume from the "last valid -range" and upload the subsequent chunk. A 416 will be returned under the -following conditions: - -- Invalid Content-Range header format -- Out of order chunk: the range of the next chunk must start immediately after - the "last valid range" from the previous response. - -When a chunk is accepted as part of the upload, a `202 Accepted` response will -be returned, including a `Range` header with the current upload status: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -##### Completed Upload - -For an upload to be considered complete, the client must submit a `PUT` -request on the upload endpoint with a digest parameter. If it is not provided, -the download will not be considered complete. The format for the final chunk -will be as follows: - -``` -PUT /v2//blob/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated -the upload. Multiple "digest" parameters may be provided with different -digests. The server may verify none or all of them but _must_ notify the -client if the content is rejected. - -When the last chunk is received and the layer has been validated, the client -will receive a `201 Created` response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -###### Digest Parameter - -The "digest" parameter is designed as an opaque parameter to support -verification of a successful transfer. The initial version of the registry API -will support a tarsum digest, in the standard tarsum format. For example, a -HTTP URI parameter might be as follows: - -``` -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Given this parameter, the registry will verify that the provided content does -result in this tarsum. Optionally, the registry can support other other digest -parameters for non-tarfile content stored as a layer. A regular hash digest -might be specified as follows: - -``` -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Such a parameter would be used to verify that the binary content (as opposed -to the tar content) would be verified at the end of the upload process. - -For the initial version, registry servers are only required to support the -tarsum format. - -##### Canceling an Upload - -An upload can be cancelled by issuing a DELETE request to the upload endpoint. -The format will be as follows: - -``` -DELETE /v2//blobs/uploads/ -``` - -After this request is issued, the upload uuid will no longer be valid and the -registry server will dump all intermediate data. While uploads will time out -if not completed, clients should issue this request if they encounter a fatal -error but still have the ability to issue an http request. - -##### Errors - -If an 502, 503 or 504 error is received, the client should assume that the -download can proceed due to a temporary condition, honoring the appropriate -retry mechanism. Other 5xx errors should be treated as terminal. - -If there is a problem with the upload, a 4xx error will be returned indicating -the problem. After receiving a 4xx response (except 416, as called out above), -the upload will be considered failed and the client should take appropriate -action. - -Note that the upload url will not be available forever. If the upload uuid is -unknown to the registry, a `404 Not Found` response will be returned and the -client must restart the upload process. - -#### Pushing an Image Manifest - -Once all of the layers for an image are uploaded, the client can upload the -image manifest. An image can be pushed using the following request format: - - PUT /v2//manifests/ - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": , - ... - } - -The `name` and `reference` fields of the response body must match those specified in -the URL. The `reference` field may be a "tag" or a "digest". - -If there is a problem with pushing the manifest, a relevant 4xx response will -be returned with a JSON error message. Please see the _PUT Manifest section -for details on possible error codes that may be returned. - -If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are -returned. The `detail` field of the error response will have a `digest` field -identifying the missing blob, which will be a tarsum. An error is returned for -each unknown blob. The response format is as follows: - - { - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] - } - -#### Listing Image Tags - -It may be necessary to list all of the tags under a given repository. The tags -for an image repository can be retrieved with the following request: - - GET /v2//tags/list - -The response will be in the following format: - - 200 OK - Content-Type: application/json - - { - "name": , - "tags": [ - , - ... - ] - } - -For repositories with a large number of tags, this response may be quite -large, so care should be taken by the client when parsing the response to -reduce copying. - -### Deleting an Image - -An image may be deleted from the registry via its `name` and `reference`. A -delete may be issued with the following request format: - - DELETE /v2//manifests/ - -For deletes, `reference` *must* be a digest or the delete will fail. If the -image exists and has been successfully deleted, the following response will be -issued: - - 202 Accepted - Content-Length: None - -If the image had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -## Detail - -> **Note**: This section is still under construction. For the purposes of -> implementation, if any details below differ from the described request flows -> above, the section below should be corrected. When they match, this note -> should be removed. - -The behavior of the endpoints are covered in detail in this section, organized -by route and entity. All aspects of the request and responses are covered, -including headers, parameters and body formats. Examples of requests and their -corresponding responses, with success and failure, are enumerated. - -> **Note**: The sections on endpoint detail are arranged with an example -> request, a description of the request, followed by information about that -> request. - -A list of methods and URIs are covered in the table below: - -|Method|Path|Entity|Description| --------|----|------|------------ -{{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | -{{end}}{{end}} - -The detail for each endpoint is covered in the following sections. - -### Errors - -The error codes encountered via the API are enumerated in the following table: - -|Code|Message|Description| --------|----|------|------------ -{{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} -{{end}} - -{{range $route := .RouteDescriptors}} -### {{.Entity}} - -{{.Description}} - -{{range $method := $route.Methods}} - -#### {{.Method}} {{$route.Entity}} - -{{.Description}} - -{{if .Requests}}{{range .Requests}}{{if .Name}} -##### {{.Name}}{{end}} - -``` -{{$method.Method}} {{$route.Path|prettygorilla}}{{if .QueryParameters}}?{{range .QueryParameters}}{{.Name}}={{.Format}}{{end}}{{end}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} - -{{if or .Headers .PathParameters .QueryParameters}} -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -{{range .Headers}}|`{{.Name}}`|header|{{.Description}}| -{{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| -{{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| -{{end}}{{end}} - -{{if .Successes}} -{{range .Successes}} -###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} - -{{if .Headers}}The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}}{{end}}{{end}} - -{{if .Failures}} -{{range .Failures}} -###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} -{{if .Headers}} -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}} - -{{if .ErrorCodes}} -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -{{range $err := .ErrorCodes}}| `{{$err}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | -{{end}} - -{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} - -{{end}} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/auth/token.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/auth/token.md deleted file mode 100644 index 921622578640..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/auth/token.md +++ /dev/null @@ -1,413 +0,0 @@ -# Docker Registry v2 authentication via central service - -Today a Docker Registry can run in standalone mode in which there are no -authorization checks. While adding your own HTTP authorization requirements in -a proxy placed between the client and the registry can give you greater access -control, we'd like a native authorization mechanism that's public key based -with access control lists managed separately with the ability to have fine -granularity in access control on a by-key, by-user, by-namespace, and -by-repository basis. In v1 this can be configured by specifying an -`index_endpoint` in the registry's config. Clients present tokens generated by -the index and tokens are validated on-line by the registry with every request. -This results in a complex authentication and authorization loop that occurs -with every registry operation. Some people are very familiar with this image: - -![index auth](https://docs.docker.com/static_files/docker_pull_chart.png) - -The above image outlines the 6-step process in accessing the Official Docker -Registry. - -1. Contact the Docker Hub to know where I should download “samalba/busybox” -2. Docker Hub replies: - a. samalba/busybox is on Registry A - b. here are the checksums for samalba/busybox (for all layers) - c. token -3. Contact Registry A to receive the layers for samalba/busybox (all of them to - the base image). Registry A is authoritative for “samalba/busybox” but keeps - a copy of all inherited layers and serve them all from the same location. -4. Registry contacts Docker Hub to verify if token/user is allowed to download - images. -5. Docker Hub returns true/false lettings registry know if it should proceed or - error out. -6. Get the payload for all layers. - -The goal of this document is to outline a way to eliminate steps 4 and 5 from -the above process by using cryptographically signed tokens and no longer -require the client to authenticate each request with a username and password -stored locally in plain text. - -The new registry workflow is more like this: - -![v2 registry auth](https://docs.google.com/drawings/d/1EHZU9uBLmcH0kytDClBv6jv6WR4xZjE8RKEUw1mARJA/pub?w=480&h=360) - -1. Attempt to begin a push/pull operation with the registry. -2. If the registry requires authorization it will return a `401 Unauthorized` - HTTP response with information on how to authenticate. -3. The registry client makes a request to the authorization service for a - signed JSON Web Token. -4. The authorization service returns a token. -5. The client retries the original request with the token embedded in the - request header. -6. The Registry authorizes the client and begins the push/pull session as - usual. - -## Requirements - -- Registry Clients capable of generating key pairs which can be used to - authenticate to an authorization server. -- An authorization server capable of managing user accounts, their public keys, - and access controls to their resources hosted by any given service (such as - repositories in a Docker Registry). -- A Docker Registry capable of trusting the authorization server to sign tokens - which clients can use for authorization and the ability to verify these - tokens for single use or for use during a sufficiently short period of time. - -## Authorization Server Endpoint Descriptions - -This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) - -The described server is meant to serve as a user account and key manager and a -centralized access control list for resources hosted by other services which -wish to authenticate and manage authorizations using this services accounts and -their public keys. - -Such a service could be used by the official docker registry to authenticate -clients and verify their authorization to docker image repositories. - -Docker will need to be updated to interact with an authorization server to get -an authorization token. - -## How to authenticate - -Today, registry clients first contact the index to initiate a push or pull. -For v2, clients should contact the registry first. If the registry server -requires authentication it will return a `401 Unauthorized` response with a -`WWW-Authenticate` header detailing how to authenticate to this registry. - -For example, say I (username `jlhawn`) am attempting to push an image to the -repository `samalba/my-app`. For the registry to authorize this, I either need -`push` access to the `samalba/my-app` repository or `push` access to the whole -`samalba` namespace in general. The registry will first return this response: - -``` -HTTP/1.1 401 Unauthorized -WWW-Authenticate: Bearer realm="https://auth.docker.com/v2/token/",service="registry.docker.com",scope="repository:samalba/my-app:push" -``` - -This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) - -The client will then know to make a `GET` request to the URL -`https://auth.docker.com/v2/token/` using the `service` and `scope` values from -the `WWW-Authenticate` header. - -## Requesting a Token - -#### Query Parameters - -
-
- service -
-
- The name of the service which hosts the resource. -
-
- scope -
-
- The resource in question, formatted as one of the space-delimited - entries from the scope parameters from the WWW-Authenticate header - shown above. This query parameter should be specified multiple times if - there is more than one scope entry from the WWW-Authenticate - header. The above example would be specified as: - scope=repository:samalba/my-app:push. -
-
- account -
-
- The name of the account which the client is acting as. Optional if it - can be inferred from client authentication. -
-
- -#### Description - -Requests an authorization token for access to a specific resource hosted by a -specific service provider. Requires the client to authenticate either using a -TLS client certificate or using basic authentication (or any other kind of -digest/challenge/response authentication scheme if the client doesn't support -TLS client certs). If the key in the client certificate is linked to an account -then the token is issued for that account key. If the key in the certificate is -linked to multiple accounts then the client must specify the `account` query -parameter. The returned token is in JWT (JSON Web Token) format, signed using -the authorization server's private key. - -#### Example - -For this example, the client makes an HTTP request to the following endpoint -over TLS using a client certificate with the server being configured to allow a -non-verified issuer during the handshake (i.e., a self-signed client cert is -okay). - -``` -GET /v2/token/?service=registry.docker.com&scope=repository:samalba/my-app:push&account=jlhawn HTTP/1.1 -Host: auth.docker.com -``` - -The server first inspects the client certificate to extract the subject key and -lookup which account it is associated with. The client is now authenticated -using that account. - -The server next searches its access control list for the account's access to -the repository `samalba/my-app` hosted by the service `registry.docker.com`. - -The server will now construct a JSON Web Token to sign and return. A JSON Web -Token has 3 main parts: - -1. Headers - - The header of a JSON Web Token is a standard JOSE header. The "typ" field - will be "JWT" and it will also contain the "alg" which identifies the - signing algorithm used to produce the signature. It will also usually have - a "kid" field, the ID of the key which was used to sign the token. - - Here is an example JOSE Header for a JSON Web Token (formatted with - whitespace for readability): - - ``` - { - "typ": "JWT", - "alg": "ES256", - "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6" - } - ``` - - It specifies that this object is going to be a JSON Web token signed using - the key with the given ID using the Elliptic Curve signature algorithm - using a SHA256 hash. - -2. Claim Set - - The Claim Set is a JSON struct containing these standard registered claim - name fields: - -
-
- iss (Issuer) -
-
- The issuer of the token, typically the fqdn of the authorization - server. -
-
- sub (Subject) -
-
- The subject of the token; the id of the client which requested it. -
-
- aud (Audience) -
-
- The intended audience of the token; the id of the service which - will verify the token to authorize the client/subject. -
-
- exp (Expiration) -
-
- The token should only be considered valid up to this specified date - and time. -
-
- nbf (Not Before) -
-
- The token should not be considered valid before this specified date - and time. -
-
- iat (Issued At) -
-
- Specifies the date and time which the Authorization server - generated this token. -
-
- jti (JWT ID) -
-
- A unique identifier for this token. Can be used by the intended - audience to prevent replays of the token. -
-
- - The Claim Set will also contain a private claim name unique to this - authorization server specification: - -
-
- access -
-
- An array of access entry objects with the following fields: - -
-
- type -
-
- The type of resource hosted by the service. -
-
- name -
-
- The name of the recource of the given type hosted by the - service. -
-
- actions -
-
- An array of strings which give the actions authorized on - this resource. -
-
-
-
- - Here is an example of such a JWT Claim Set (formatted with whitespace for - readability): - - ``` - { - "iss": "auth.docker.com", - "sub": "jlhawn", - "aud": "registry.docker.com", - "exp": 1415387315, - "nbf": 1415387015, - "iat": 1415387015, - "jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws", - "access": [ - { - "type": "repository", - "name": "samalba/my-app", - "actions": [ - "push" - ] - } - ] - } - ``` - -3. Signature - - The authorization server will produce a JOSE header and Claim Set with no - extraneous whitespace, i.e., the JOSE Header from above would be - - ``` - {"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"} - ``` - - and the Claim Set from above would be - - ``` - {"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push"]}]} - ``` - - The utf-8 representation of this JOSE header and Claim Set are then - url-safe base64 encoded (sans trailing '=' buffer), producing: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0 - ``` - - for the JOSE Header and - - ``` - eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 - ``` - - for the Claim Set. These two are concatenated using a '.' character, - yielding the string: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 - ``` - - This is then used as the payload to a the `ES256` signature algorithm - specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA) - draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4) - - This example signature will use the following ECDSA key for the server: - - ``` - { - "kty": "EC", - "crv": "P-256", - "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6", - "d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA", - "x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q", - "y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc" - } - ``` - - A resulting signature of the above payload using this key is: - - ``` - QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w - ``` - - Concatenating all of these together with a `.` character gives the - resulting JWT: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w - ``` - -This can now be placed in an HTTP response and returned to the client to use to -authenticate to the audience service: - - -``` -HTTP/1.1 200 OK -Content-Type: application/json - -{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"} -``` - -## Using the signed token - -Once the client has a token, it will try the registry request again with the -token placed in the HTTP `Authorization` header like so: - -``` -Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw -``` - -This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) - -## Verifying the token - -The registry must now verify the token presented by the user by inspecting the -claim set within. The registry will: - -- Ensure that the issuer (`iss` claim) is an authority it trusts. -- Ensure that the registry identifies as the audience (`aud` claim). -- Check that the current time is between the `nbf` and `exp` claim times. -- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has - not been seen before. - - To enforce this, the registry may keep a record of `jti`s it has seen for - up to the `exp` time of the token to prevent token replays. -- Check the `access` claim value and use the identified resources and the list - of actions authorized to determine whether the token grants the required - level of access for the operation the client is attempting to perform. -- Verify that the signature of the token is valid. - -At no point in this process should the registry need to call back to -the authorization server. If anything, it would only need to update a list of -trusted public keys for verifying token signatures or use a separate API -(still to be spec'd) to add/update resource records on the authorization -server. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/json.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/json.md deleted file mode 100644 index 596b218f2781..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/spec/json.md +++ /dev/null @@ -1,77 +0,0 @@ -# Docker Distribution JSON Canonicalization - -## Introduction - -To provide consistent content hashing of JSON objects throughout Docker -Distribution APIs, a canonical JSON format has been defined. Adopting such a -canonicalization also aids in caching JSON responses. - -## Rules - -Compliant JSON should conform to the following rules: - -1. All generated JSON should comply with [RFC - 7159](http://www.ietf.org/rfc/rfc7159.txt). -2. Resulting "JSON text" shall always be encoded in UTF-8. -3. Unless a canonical key order is defined for a particular schema, object - keys shall always appear in lexically sorted order. -4. All whitespace between tokens should be removed. -5. No "trailing commas" are allowed in object or array definitions. - -## Examples - -The following is a simple example of a canonicalized JSON string: - -```json -{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]} -``` - -## Reference - -### Other Canonicalizations - -The OLPC project specifies [Canonical -JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in -[TUF](http://theupdateframework.com/), which may be used with other -distribution-related protocols, this alternative format has been proposed in -case the original source changes. Specifications complying with either this -specification or an alternative should explicitly call out the -canonicalization format. Except for key ordering, this specification is mostly -compatible. - -### Go - -In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library -will emit canonical JSON by default. Simply using `json.Marshal` will suffice -in most cases: - -```go -incoming := map[string]interface{}{ - "asdf": 1, - "qwer": []interface{}{}, - "zxcv": []interface{}{ - map[string]interface{}{}, - true, - int(1e9), - "tyui", - }, -} - -canonical, err := json.Marshal(incoming) -if err != nil { - // ... handle error -} -``` - -To apply canonical JSON format spacing to an existing serialized JSON buffer, one -can use -[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65) -with the following arguments: - -```go -incoming := getBytes() -var canonical bytes.Buffer -if err := json.Indent(&canonical, incoming, "", ""); err != nil { - // ... handle error -} -``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/azure.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/azure.md deleted file mode 100644 index f0fd296dd354..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/azure.md +++ /dev/null @@ -1,16 +0,0 @@ -# Docker Registry Microsoft Azure Blob Storage Driver - - -An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage][azure-blob-storage] for object storage. - -## Parameters - -The following parameters must be used to authenticate and configure the storage driver (case-sensitive): - -* `accountname`: Name of the Azure Storage Account. -* `accountkey`: Primary or Secondary Key for the Storage Account. -* `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. - - -[azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/ -[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/filesystem.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/filesystem.md deleted file mode 100644 index ba3ea564284d..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/filesystem.md +++ /dev/null @@ -1,8 +0,0 @@ -Docker-Registry Filesystem Storage Driver -========================================= - -An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. - -## Parameters - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to `/tmp/registry/storage`. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/inmemory.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/inmemory.md deleted file mode 100644 index 2447e2cad5bc..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/inmemory.md +++ /dev/null @@ -1,10 +0,0 @@ -Docker-Registry In-Memory Storage Driver -========================================= - -An implementation of the `storagedriver.StorageDriver` interface which uses local memory for object storage. - -**IMPORTANT**: This storage driver *does not* persist data across runs, and primarily exists for testing. - -## Parameters - -None diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/s3.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/s3.md deleted file mode 100644 index fb0dd014ac14..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedriver/s3.md +++ /dev/null @@ -1,26 +0,0 @@ -Docker-Registry S3 Storage Driver -========================================= - -An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 for object storage. - -## Parameters - -`accesskey`: Your aws access key. - -`secretkey`: Your aws secret key. - -**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. - -`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html - -`bucket`: The name of your s3 bucket where you wish to store objects (needs to already be created prior to driver initialization). - -`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). - -`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transfering over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. - -`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to true if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) - -`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to s3. The default is 10 MB. Keep in mind that the minimum part size for s3 is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to s3. - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedrivers.md b/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedrivers.md deleted file mode 100644 index c230c11614d4..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/doc/storagedrivers.md +++ /dev/null @@ -1,50 +0,0 @@ -Docker-Registry Storage Driver -============================== - -This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. - -Provided Drivers -================ - -This storage driver package comes bundled with several drivers: - -- [inmemory](storagedriver/inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing. -- [filesystem](storagedriver/filesystem.md): A local storage driver configured to use a directory tree in the local filesystem. -- [s3](storagedriver/s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. -- [azure](storagedriver/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). - -Storage Driver API -================== - -The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. - -Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. - -Storage drivers are intended (but not required) to be written in go, providing compile-time validation of the `storagedriver.StorageDriver` interface, although an IPC driver wrapper means that it is not required for drivers to be included in the compiled registry. The `storagedriver/ipc` package provides a client/server protocol for running storage drivers provided in external executables as a managed child server process. - -Driver Selection and Configuration -================================== - -The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. - -Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no driver is registered with the given name, this factory will attempt to find an executable storage driver with the executable name "registry-storage-\" and return an IPC storage driver wrapper managing the driver subprocess. If no such storage driver can be found, `factory.Create` will return an `InvalidStorageDriverError`. - -Driver Contribution -=================== - -## Writing new storage drivers -To create a valid storage driver, one must implement the `storagedriver.StorageDriver` interface and make sure to expose this driver via the factory system and as a distributable IPC server executable. - -### In-process drivers -Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. - -### Out-of-process drivers -As many users will run the registry as a pre-constructed docker container, storage drivers should also be distributable as IPC server executables. Drivers written in go should model the main method provided in `storagedriver/filesystem/registry-storage-filesystem/filesystem.go`. Parameters to IPC drivers will be provided as a JSON-serialized map in the first argument to the process. These parameters should be validated and then a blocking call to `ipc.StorageDriverServer` should be made with a new storage driver. - -Out-of-process drivers must also implement the `ipc.IPCStorageDriver` interface, which exposes a `Version` check for the storage driver. This is used to validate storage driver api compatibility at driver load-time. - -## Testing -Storage driver test suites are provided in `storagedriver/testsuites/testsuites.go` and may be used for any storage driver written in go. Two methods are provided for registering test suites, `RegisterInProcessSuite` and `RegisterIPCSuite`, which run the same set of tests for the driver imported or managed over IPC respectively. - -## Drivers written in other languages -Although storage drivers are strongly recommended to be written in go for consistency, compile-time validation, and support, the IPC framework allows for a level of language-agnosticism. Non-go drivers must implement the storage driver protocol by mimicing StorageDriverServer in `storagedriver/ipc/server.go`. As the IPC framework is a layer on top of [docker/libchan](https://github.com/docker/libchan), this currently limits language support to Java via [ndeloof/chan](https://github.com/ndeloof/jchan) and Javascript via [GraftJS/jschan](https://github.com/GraftJS/jschan), although contributions to the libchan project are welcome. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go new file mode 100644 index 000000000000..73fcc4535f8b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go @@ -0,0 +1,37 @@ +package api + +import ( + "errors" + "net/http" + + "github.com/docker/distribution/health" +) + +var ( + updater = health.NewStatusUpdater() +) + +// DownHandler registers a manual_http_status that always returns an Error +func DownHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(errors.New("Manual Check")) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// UpHandler registers a manual_http_status that always returns nil +func UpHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(nil) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// init sets up the two endpoints to bring the service up and down +func init() { + health.Register("manual_http_status", updater) + http.HandleFunc("/debug/health/down", DownHandler) + http.HandleFunc("/debug/health/up", UpHandler) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/api/api_test.go b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api_test.go new file mode 100644 index 000000000000..ec82154f6ef2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api_test.go @@ -0,0 +1,86 @@ +package api + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/health" +) + +// TestGETDownHandlerDoesNotChangeStatus ensures that calling the endpoint +// /debug/health/down with METHOD GET returns a 404 +func TestGETDownHandlerDoesNotChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/down", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 404 { + t.Errorf("Did not get a 404.") + } +} + +// TestGETUpHandlerDoesNotChangeStatus ensures that calling the endpoint +// /debug/health/down with METHOD GET returns a 404 +func TestGETUpHandlerDoesNotChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/up", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 404 { + t.Errorf("Did not get a 404.") + } +} + +// TestPOSTDownHandlerChangeStatus ensures the endpoint /debug/health/down changes +// the status code of the response to 503 +// This test is order dependent, and should come before TestPOSTUpHandlerChangeStatus +func TestPOSTDownHandlerChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/down", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } + + if len(health.CheckStatus()) != 1 { + t.Errorf("DownHandler didn't add an error check.") + } +} + +// TestPOSTUpHandlerChangeStatus ensures the endpoint /debug/health/up changes +// the status code of the response to 200 +func TestPOSTUpHandlerChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/up", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + UpHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } + + if len(health.CheckStatus()) != 0 { + t.Errorf("UpHandler didn't remove the error check.") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go b/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go new file mode 100644 index 000000000000..9de140107868 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go @@ -0,0 +1,35 @@ +package checks + +import ( + "errors" + "github.com/docker/distribution/health" + "net/http" + "os" +) + +// FileChecker checks the existence of a file and returns and error +// if the file exists, taking the application out of rotation +func FileChecker(f string) health.Checker { + return health.CheckFunc(func() error { + if _, err := os.Stat(f); err == nil { + return errors.New("file exists") + } + return nil + }) +} + +// HTTPChecker does a HEAD request and verifies if the HTTP status +// code return is a 200, taking the application out of rotation if +// otherwise +func HTTPChecker(r string) health.Checker { + return health.CheckFunc(func() error { + response, err := http.Head(r) + if err != nil { + return errors.New("error while checking: " + r) + } + if response.StatusCode != http.StatusOK { + return errors.New("downstream service returned unexpected status: " + string(response.StatusCode)) + } + return nil + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go new file mode 100644 index 000000000000..8faa32f7c022 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/doc.go @@ -0,0 +1,130 @@ +// Package health provides a generic health checking framework. +// The health package works expvar style. By importing the package the debug +// server is getting a "/debug/health" endpoint that returns the current +// status of the application. +// If there are no errors, "/debug/health" will return a HTTP 200 status, +// together with an empty JSON reply "{}". If there are any checks +// with errors, the JSON reply will include all the failed checks, and the +// response will be have an HTTP 503 status. +// +// A Check can either be run synchronously, or asynchronously. We recommend +// that most checks are registered as an asynchronous check, so a call to the +// "/debug/health" endpoint always returns immediately. This pattern is +// particularly useful for checks that verify upstream connectivity or +// database status, since they might take a long time to return/timeout. +// +// Installing +// +// To install health, just import it in your application: +// +// import "github.com/docker/distribution/health" +// +// You can also (optionally) import "health/api" that will add two convenience +// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add +// "manual" checks that allow the service to quickly be brought in/out of +// rotation. +// +// import _ "github.com/docker/distribution/registry/health/api" +// +// # curl localhost:5001/debug/health +// {} +// # curl -X POST localhost:5001/debug/health/down +// # curl localhost:5001/debug/health +// {"manual_http_status":"Manual Check"} +// +// After importing these packages to your main application, you can start +// registering checks. +// +// Registering Checks +// +// The recommended way of registering checks is using a periodic Check. +// PeriodicChecks run on a certain schedule and asynchronously update the +// status of the check. This allows "CheckStatus()" to return without blocking +// on an expensive check. +// +// A trivial example of a check that runs every 5 seconds and shuts down our +// server if the current minute is even, could be added as follows: +// +// func currentMinuteEvenCheck() error { +// m := time.Now().Minute() +// if m%2 == 0 { +// return errors.New("Current minute is even!") +// } +// return nil +// } +// +// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5) +// +// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to +// implement the exact same check, but add a threshold of failures after which +// the check will be unhealthy. This is particularly useful for flaky Checks, +// ensuring some stability of the service when handling them. +// +// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4) +// +// The lowest-level way to interact with the health package is calling +// "Register" directly. Register allows you to pass in an arbitrary string and +// something that implements "Checker" and runs your check. If your method +// returns an error with nil, it is considered a healthy check, otherwise it +// will make the health check endpoint "/debug/health" start returning a 503 +// and list the specific check that failed. +// +// Assuming you wish to register a method called "currentMinuteEvenCheck() +// error" you could do that by doing: +// +// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck)) +// +// CheckFunc is a convenience type that implements Checker. +// +// Another way of registering a check could be by using an anonymous function +// and the convenience method RegisterFunc. An example that makes the status +// endpoint always return an error: +// +// health.RegisterFunc("my_check", func() error { +// return Errors.new("This is an error!") +// })) +// +// Examples +// +// You could also use the health checker mechanism to ensure your application +// only comes up if certain conditions are met, or to allow the developer to +// take the service out of rotation immediately. An example that checks +// database connectivity and immediately takes the server out of rotation on +// err: +// +// updater = health.NewStatusUpdater() +// health.RegisterFunc("database_check", func() error { +// return updater.Check() +// })) +// +// conn, err := Connect(...) // database call here +// if err != nil { +// updater.Update(errors.New("Error connecting to the database: " + err.Error())) +// } +// +// You can also use the predefined Checkers that come included with the health +// package. First, import the checks: +// +// import "github.com/docker/distribution/health/checks +// +// After that you can make use of any of the provided checks. An example of +// using a `FileChecker` to take the application out of rotation if a certain +// file exists can be done as follows: +// +// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5)) +// +// After registering the check, it is trivial to take an application out of +// rotation from the console: +// +// # curl localhost:5001/debug/health +// {} +// # touch /tmp/disable +// # curl localhost:5001/debug/health +// {"fileChecker":"file exists"} +// +// You could also test the connectivity to a downstream service by using a +// "HTTPChecker", but ensure that you only mark the test unhealthy if there +// are a minimum of two failures in a row: +// +// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2)) +package health diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/health.go b/Godeps/_workspace/src/github.com/docker/distribution/health/health.go new file mode 100644 index 000000000000..512539c1c15b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/health.go @@ -0,0 +1,212 @@ +package health + +import ( + "encoding/json" + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + registeredChecks = make(map[string]Checker) +) + +// Checker is the interface for a Health Checker +type Checker interface { + // Check returns nil if the service is okay. + Check() error +} + +// CheckFunc is a convenience type to create functions that implement +// the Checker interface +type CheckFunc func() error + +// Check Implements the Checker interface to allow for any func() error method +// to be passed as a Checker +func (cf CheckFunc) Check() error { + return cf() +} + +// Updater implements a health check that is explicitly set. +type Updater interface { + Checker + + // Update updates the current status of the health check. + Update(status error) +} + +// updater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type updater struct { + mu sync.Mutex + status error +} + +// Check implements the Checker interface +func (u *updater) Check() error { + u.mu.Lock() + defer u.mu.Unlock() + + return u.status +} + +// Update implements the Updater interface, allowing asynchronous access to +// the status of a Checker. +func (u *updater) Update(status error) { + u.mu.Lock() + defer u.mu.Unlock() + + u.status = status +} + +// NewStatusUpdater returns a new updater +func NewStatusUpdater() Updater { + return &updater{} +} + +// thresholdUpdater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type thresholdUpdater struct { + mu sync.Mutex + status error + threshold int + count int +} + +// Check implements the Checker interface +func (tu *thresholdUpdater) Check() error { + tu.mu.Lock() + defer tu.mu.Unlock() + + if tu.count >= tu.threshold { + return tu.status + } + + return nil +} + +// thresholdUpdater implements the Updater interface, allowing asynchronous +// access to the status of a Checker. +func (tu *thresholdUpdater) Update(status error) { + tu.mu.Lock() + defer tu.mu.Unlock() + + if status == nil { + tu.count = 0 + } else if tu.count < tu.threshold { + tu.count++ + } + + tu.status = status +} + +// NewThresholdStatusUpdater returns a new thresholdUpdater +func NewThresholdStatusUpdater(t int) Updater { + return &thresholdUpdater{threshold: t} +} + +// PeriodicChecker wraps an updater to provide a periodic checker +func PeriodicChecker(check Checker, period time.Duration) Checker { + u := NewStatusUpdater() + go func() { + t := time.NewTicker(period) + for { + <-t.C + u.Update(check.Check()) + } + }() + + return u +} + +// PeriodicThresholdChecker wraps an updater to provide a periodic checker that +// uses a threshold before it changes status +func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int) Checker { + tu := NewThresholdStatusUpdater(threshold) + go func() { + t := time.NewTicker(period) + for { + <-t.C + tu.Update(check.Check()) + } + }() + + return tu +} + +// CheckStatus returns a map with all the current health check errors +func CheckStatus() map[string]string { + mutex.RLock() + defer mutex.RUnlock() + statusKeys := make(map[string]string) + for k, v := range registeredChecks { + err := v.Check() + if err != nil { + statusKeys[k] = err.Error() + } + } + + return statusKeys +} + +// Register associates the checker with the provided name. We allow +// overwrites to a specific check status. +func Register(name string, check Checker) { + mutex.Lock() + defer mutex.Unlock() + _, ok := registeredChecks[name] + if ok { + panic("Check already exists: " + name) + } + registeredChecks[name] = check +} + +// RegisterFunc allows the convenience of registering a checker directly +// from an arbitrary func() error +func RegisterFunc(name string, check func() error) { + Register(name, CheckFunc(check)) +} + +// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker +// from an arbitrary func() error +func RegisterPeriodicFunc(name string, check func() error, period time.Duration) { + Register(name, PeriodicChecker(CheckFunc(check), period)) +} + +// RegisterPeriodicThresholdFunc allows the convenience of registering a +// PeriodicChecker from an arbitrary func() error +func RegisterPeriodicThresholdFunc(name string, check func() error, period time.Duration, threshold int) { + Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) +} + +// StatusHandler returns a JSON blob with all the currently registered Health Checks +// and their corresponding status. +// Returns 503 if any Error status exists, 200 otherwise +func StatusHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + checksStatus := CheckStatus() + // If there is an error, return 503 + if len(checksStatus) != 0 { + w.WriteHeader(http.StatusServiceUnavailable) + } + err := json.NewEncoder(w).Encode(checksStatus) + + // Parsing of the JSON failed. Returning generic error message + if err != nil { + w.Write([]byte("{server_error: 'Could not parse error message'}")) + } + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// Registers global /debug/health api endpoint +func init() { + http.HandleFunc("/debug/health", StatusHandler) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go b/Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go new file mode 100644 index 000000000000..7989f0b28b0f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go @@ -0,0 +1,47 @@ +package health + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" +) + +// TestReturns200IfThereAreNoChecks ensures that the result code of the health +// endpoint is 200 if there are not currently registered checks. +func TestReturns200IfThereAreNoChecks(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + StatusHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } +} + +// TestReturns500IfThereAreErrorChecks ensures that the result code of the +// health endpoint is 500 if there are health checks with errors +func TestReturns503IfThereAreErrorChecks(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + // Create a manual error + Register("some_check", CheckFunc(func() error { + return errors.New("This Check did not succeed") + })) + + StatusHandler(recorder, req) + + if recorder.Code != 503 { + t.Errorf("Did not get a 503.") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go index 21d2105dea16..baa90a5bf978 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go @@ -6,6 +6,7 @@ import ( "code.google.com/p/go-uuid/uuid" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" ) @@ -45,7 +46,7 @@ func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request Re func NewRequestRecord(id string, r *http.Request) RequestRecord { return RequestRecord{ ID: id, - Addr: r.RemoteAddr, + Addr: context.RemoteAddr(r), Host: r.Host, Method: r.Method, UserAgent: r.UserAgent(), diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go index 34f167ff7450..db16ad36e1c1 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go @@ -9,6 +9,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" @@ -16,7 +17,7 @@ import ( ) func TestListener(t *testing.T) { - registry := storage.NewRegistryWithDriver(inmemory.New()) + registry := storage.NewRegistryWithDriver(inmemory.New(), cache.NewInMemoryLayerInfoCache()) tl := &testListener{ ops: make(map[string]int), } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/open-design/MANIFESTO.md b/Godeps/_workspace/src/github.com/docker/distribution/open-design/MANIFESTO.md deleted file mode 100644 index f3cd03da8750..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/open-design/MANIFESTO.md +++ /dev/null @@ -1,20 +0,0 @@ -# The "Distribution" project - -## What is this - -This is a part of the Docker project, or "primitive" that handles the "distribution" of images. - -### Punchline - -Pack. Sign. Ship. Store. Deliver. Verify. - -### Technical scope - -Distribution has tight relations with: - - * libtrust, providing cryptographical primitives to handle image signing and verification - * image format, as transferred over the wire - * docker-registry, the server side component that allows storage and retrieval of packed images - * authentication and key management APIs, that are used to verify images and access storage services - * PKI infrastructure - * docker "pull/push client" code gluing all this together - network communication code, tarsum, etc diff --git a/Godeps/_workspace/src/github.com/docker/distribution/open-design/ROADMAP.md b/Godeps/_workspace/src/github.com/docker/distribution/open-design/ROADMAP.md deleted file mode 100644 index 54e244ad3bf3..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/open-design/ROADMAP.md +++ /dev/null @@ -1,41 +0,0 @@ -# Roadmap - -## 11/24/2014: alpha - -Design and code: - -- implements a basic configuration loading mechanism: https://github.com/docker/docker-registry/issues/646 -- storage API is frozen, implemented and used: https://github.com/docker/docker-registry/issues/616 -- REST API defined and partly implemented: https://github.com/docker/docker-registry/issues/634 -- basic logging: https://github.com/docker/docker-registry/issues/635 -- auth design is frozen: https://github.com/docker/docker-registry/issues/623 - -Environment: - -- some good practice are in place and documented: https://github.com/docker/docker-registry/issues/657 - -## 12/22/2014: beta - -Design and code: - -- feature freeze -- mirroring defined: https://github.com/docker/docker-registry/issues/658 -- extension model defined: https://github.com/docker/docker-registry/issues/613 - -Environment: - -- doc-driven approach: https://github.com/docker/docker-registry/issues/627 - -## 01/12/2015: RC - -Design and code: - -- third party drivers and extensions -- basic search extension -- third-party layers garbage collection scripts -- healthcheck endpoints: https://github.com/docker/docker-registry/issues/656 -- bugnsnag/new-relic support: https://github.com/docker/docker-registry/issues/680 - -Environment: - -- exhaustive test-cases diff --git a/Godeps/_workspace/src/github.com/docker/distribution/open-design/specs/TEMPLATE.md b/Godeps/_workspace/src/github.com/docker/distribution/open-design/specs/TEMPLATE.md deleted file mode 100644 index a87ea61e9111..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/open-design/specs/TEMPLATE.md +++ /dev/null @@ -1,52 +0,0 @@ -# DEP #X: Awesome proposal - -## Scope - -This is related to "Foo" (eg: authentication/storage/extension/...). - -## Abstract - -This proposal suggests to add support for "bar". - -## User stories - -"I'm a Hub user, and 'bar' allows me to do baz1" - -"I'm a FOSS user running my private registry and 'bar' allows me to do baz2" - -"I'm a company running the registry and 'bar' allows me to do baz3" - -## Technology pre-requisites - -'bar' can be implemented using: - - * foobar approach - * barfoo concurrent approach - -## Dependencies - -Project depends on baz to be completed (eg: docker engine support, or another registry proposal). - -## Technical proposal - -We are going to do foofoo alongside with some chunks of barbaz. - -## Roadmap - - * YYYY-MM-DD: proposal submitted - * YYYY-MM-DD: proposal reviewed and updated - * YYYY-MM-DD: implementation started (WIP PR) - * YYYY-MM-DD: implementation complete ready for thorough review - * YYYY-MM-DD: final PR version - * YYYY-MM-DD: implementation merged - -## Editors - -Editors: - - * my Company, or maybe just me - -Implementors: - - * me and my buddies - * another team working on a different approach \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry.go index 029457088474..c5d84a0faca2 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry.go @@ -10,8 +10,31 @@ import ( "golang.org/x/net/context" ) -// Registry represents a collection of repositories, addressable by name. -type Registry interface { +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + // Repository should return a reference to the named repository. The // registry may or may not have the repository but should always return a // reference. @@ -33,6 +56,10 @@ type Repository interface { Signatures() SignatureService } +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. + // ManifestService provides operations on image manifests. type ManifestService interface { // Exists returns true if the manifest exists. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go index 5f091bbc927b..833bff8b2403 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -28,7 +28,7 @@ var ( Name: "uuid", Type: "opaque", Required: true, - Description: `A uuid identifying the upload. This field can accept almost anything.`, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", } digestPathParameter = ParameterDescriptor{ @@ -985,7 +985,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}", + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", Entity: "Blob Upload", Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", Methods: []MethodDescriptor{ @@ -1190,9 +1190,10 @@ var routeDescriptors = []RouteDescriptor{ StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { - Name: "Location", - Type: "url", - Format: "", + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", }, { Name: "Content-Range", @@ -1200,12 +1201,7 @@ var routeDescriptors = []RouteDescriptor{ Format: "-", Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, + contentLengthZeroHeader, digestHeader, }, }, diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go index afab71fce0d7..fb268336f96f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go @@ -98,6 +98,7 @@ func TestRouter(t *testing.T) { }, }, { + // support uuid proper RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", Vars: map[string]string{ @@ -113,6 +114,21 @@ func TestRouter(t *testing.T) { "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", }, }, + { + // supports urlsafe base64 + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + }, + }, + { + // does not match + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==", + StatusCode: http.StatusNotFound, + }, { // Check ambiguity: ensure we can distinguish between tags for // "foo/bar/image/image" and image for "foo/bar/image" with tag diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go index 134b0ae55cd1..39318d1a3978 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go @@ -66,7 +66,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut return nil, &challenge } - return context.WithValue(ctx, "auth.user", auth.UserInfo{Name: "silly"}), nil + return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil } type challenge struct { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go index 5049dae35b20..1c01e42ea16e 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go @@ -1,3 +1,3 @@ // Package registry is a placeholder package for registry interface -// destinations and utilities. +// definitions and utilities. package registry diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go index 1b5effbc871c..28940c8e1d40 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go @@ -1,12 +1,14 @@ package handlers import ( + "expvar" "fmt" + "math/rand" "net" "net/http" "os" + "time" - "code.google.com/p/go-uuid/uuid" "github.com/docker/distribution" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" @@ -16,9 +18,11 @@ import ( registrymiddleware "github.com/docker/distribution/registry/middleware/registry" repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/garyburd/redigo/redis" "github.com/gorilla/mux" "golang.org/x/net/context" ) @@ -28,15 +32,12 @@ import ( // fields should be protected. type App struct { context.Context - Config configuration.Configuration - // InstanceID is a unique id assigned to the application on each creation. - // Provides information in the logs and context to identify restarts. - InstanceID string + Config configuration.Configuration router *mux.Router // main application router, configured with dispatchers driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. - registry distribution.Registry // registry is the primary registry backend for the app instance. + registry distribution.Namespace // registry is the primary registry backend for the app instance. accessController auth.AccessController // main access controller for application // events contains notification related configuration. @@ -44,17 +45,8 @@ type App struct { sink notifications.Sink source notifications.SourceRecord } -} - -// Value intercepts calls context.Context.Value, returning the current app id, -// if requested. -func (app *App) Value(key interface{}) interface{} { - switch key { - case "app.id": - return app.InstanceID - } - return app.Context.Value(key) + redis *redis.Pool } // NewApp takes a configuration and returns a configured app, ready to serve @@ -62,13 +54,12 @@ func (app *App) Value(key interface{}) interface{} { // handlers accordingly. func NewApp(ctx context.Context, configuration configuration.Configuration) *App { app := &App{ - Config: configuration, - Context: ctx, - InstanceID: uuid.New(), - router: v2.RouterWithPrefix(configuration.HTTP.Prefix), + Config: configuration, + Context: ctx, + router: v2.RouterWithPrefix(configuration.HTTP.Prefix), } - app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "app.id")) + app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "instance.id")) // Register the handler dispatchers. app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { @@ -89,14 +80,41 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App // a health check. panic(err) } + + startUploadPurger(app.driver, ctxu.GetLogger(app)) + app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) if err != nil { panic(err) } app.configureEvents(&configuration) + app.configureRedis(&configuration) + + // configure storage caches + if cc, ok := configuration.Storage["cache"]; ok { + switch cc["layerinfo"] { + case "redis": + if app.redis == nil { + panic("redis configuration required to use for layerinfo cache") + } + app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewRedisLayerInfoCache(app.redis)) + ctxu.GetLogger(app).Infof("using redis layerinfo cache") + case "inmemory": + app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewInMemoryLayerInfoCache()) + ctxu.GetLogger(app).Infof("using inmemory layerinfo cache") + default: + if cc["layerinfo"] != "" { + ctxu.GetLogger(app).Warnf("unkown cache type %q, caching disabled", configuration.Storage["cache"]) + } + } + } + + if app.registry == nil { + // configure the registry if no cache section is available. + app.registry = storage.NewRegistryWithDriver(app.driver, nil) + } - app.registry = storage.NewRegistryWithDriver(app.driver) app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) if err != nil { panic(err) @@ -170,13 +188,110 @@ func (app *App) configureEvents(configuration *configuration.Configuration) { app.events.source = notifications.SourceRecord{ Addr: hostname, - InstanceID: app.InstanceID, + InstanceID: ctxu.GetStringValue(app, "instance.id"), + } +} + +func (app *App) configureRedis(configuration *configuration.Configuration) { + if configuration.Redis.Addr == "" { + ctxu.GetLogger(app).Infof("redis not configured") + return + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + // TODO(stevvooe): Yet another use case for contextual timing. + ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) + + done := func(err error) { + logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", + ctxu.Since(ctx, "redis.connect.startedat")) + if err != nil { + logger.Errorf("redis: error connecting: %v", err) + } else { + logger.Infof("redis: connect %v", configuration.Redis.Addr) + } + } + + conn, err := redis.DialTimeout("tcp", + configuration.Redis.Addr, + configuration.Redis.DialTimeout, + configuration.Redis.ReadTimeout, + configuration.Redis.WriteTimeout) + if err != nil { + ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", + configuration.Redis.Addr, err) + done(err) + return nil, err + } + + // authorize the connection + if configuration.Redis.Password != "" { + if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + // select the database to use + if configuration.Redis.DB != 0 { + if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + done(nil) + return conn, nil + }, + MaxIdle: configuration.Redis.Pool.MaxIdle, + MaxActive: configuration.Redis.Pool.MaxActive, + IdleTimeout: configuration.Redis.Pool.IdleTimeout, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + // TODO(stevvooe): We can probably do something more interesting + // here with the health package. + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + app.redis = pool + + // setup expvar + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") } + + registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { + return map[string]interface{}{ + "Config": configuration.Redis, + "Active": app.redis.ActiveCount(), + } + })) } func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() // ensure that request body is always closed. + // Instantiate an http context here so we can track the error codes + // returned by the request router. + ctx := defaultContextManager.context(app, w, r) + defer func() { + ctxu.GetResponseLogger(ctx).Infof("response completed") + }() + defer defaultContextManager.release(ctx) + + // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. + var err error + w, err = ctxu.GetResponseWriter(ctx) + if err != nil { + ctxu.GetLogger(ctx).Warnf("response writer not found in context") + } + // Set a header with the Docker Distribution API Version for all responses. w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") app.router.ServeHTTP(w, r) @@ -191,43 +306,20 @@ type dispatchFunc func(ctx *Context, r *http.Request) http.Handler // TODO(stevvooe): dispatchers should probably have some validation error // chain with proper error reporting. -// singleStatusResponseWriter only allows the first status to be written to be -// the valid request status. The current use case of this class should be -// factored out. -type singleStatusResponseWriter struct { - http.ResponseWriter - status int -} - -func (ssrw *singleStatusResponseWriter) WriteHeader(status int) { - if ssrw.status != 0 { - return - } - ssrw.status = status - ssrw.ResponseWriter.WriteHeader(status) -} - -func (ssrw *singleStatusResponseWriter) Flush() { - if flusher, ok := ssrw.ResponseWriter.(http.Flusher); ok { - flusher.Flush() - } -} - // dispatcher returns a handler that constructs a request specific context and // handler, using the dispatch factory function. func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { context := app.context(w, r) - defer func() { - ctxu.GetResponseLogger(context).Infof("response completed") - }() - if err := app.authorized(w, r, context); err != nil { ctxu.GetLogger(context).Errorf("error authorizing context: %v", err) return } + // Add username to request logging + context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) + if app.nameRequired(r) { repository, err := app.registry.Repository(context, getName(context)) @@ -261,16 +353,16 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { } } - handler := dispatch(context, r) - - ssrw := &singleStatusResponseWriter{ResponseWriter: w} - handler.ServeHTTP(ssrw, r) + dispatch(context, r).ServeHTTP(w, r) // Automated error response handling here. Handlers may return their // own errors if they need different behavior (such as range errors // for layer upload). if context.Errors.Len() > 0 { - if ssrw.status == 0 { + if context.Value("http.response.status") == 0 { + // TODO(stevvooe): Getting this value from the context is a + // bit of a hack. We can further address with some of our + // future refactoring. w.WriteHeader(http.StatusBadRequest) } serveJSON(w, context.Errors) @@ -281,10 +373,8 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // context constructs the context object for the application. This only be // called once per request. func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { - ctx := ctxu.WithRequest(app, r) - ctx, w = ctxu.WithResponseWriter(ctx, w) + ctx := defaultContextManager.context(app, w, r) ctx = ctxu.WithVars(ctx, r) - ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "vars.name", "vars.reference", @@ -360,7 +450,6 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // should be replaced by another, rather than replacing the context on a // mutable object. context.Context = ctx - return nil } @@ -429,7 +518,7 @@ func appendAccessRecords(records []auth.Access, method string, repo string) []au } // applyRegistryMiddleware wraps a registry instance with the configured middlewares -func applyRegistryMiddleware(registry distribution.Registry, middlewares []configuration.Middleware) (distribution.Registry, error) { +func applyRegistryMiddleware(registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { for _, mw := range middlewares { rmw, err := registrymiddleware.Get(mw.Name, mw.Options, registry) if err != nil { @@ -464,3 +553,27 @@ func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []co } return driver, nil } + +// startUploadPurger schedules a goroutine which will periodically +// check upload directories for old files and delete them +func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logger) { + rand.Seed(time.Now().Unix()) + jitter := time.Duration(rand.Int()%60) * time.Minute + + // Start with reasonable defaults + // TODO:(richardscothern) make configurable + purgeAge := time.Duration(7 * 24 * time.Hour) + timeBetweenPurges := time.Duration(1 * 24 * time.Hour) + + go func() { + log.Infof("Starting upload purge in %s", jitter) + time.Sleep(jitter) + + for { + storage.PurgeUploads(storageDriver, time.Now().Add(-purgeAge), true) + log.Infof("Starting upload purge in %s", timeBetweenPurges) + time.Sleep(timeBetweenPurges) + } + }() + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go index cd515dd0c12e..d0b9174d47ac 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go @@ -13,6 +13,7 @@ import ( "github.com/docker/distribution/registry/auth" _ "github.com/docker/distribution/registry/auth/silly" "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/driver/inmemory" "golang.org/x/net/context" ) @@ -28,7 +29,7 @@ func TestAppDispatcher(t *testing.T) { Context: context.Background(), router: v2.Router(), driver: driver, - registry: storage.NewRegistryWithDriver(driver), + registry: storage.NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()), } server := httptest.NewServer(app) router := v2.Router() diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go index 5496a7941c65..0df5534682f8 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go @@ -3,6 +3,7 @@ package handlers import ( "fmt" "net/http" + "sync" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" @@ -88,3 +89,62 @@ func getUserName(ctx context.Context, r *http.Request) string { return username } + +// contextManager allows us to associate net/context.Context instances with a +// request, based on the memory identity of http.Request. This prepares http- +// level context, which is not application specific. If this is called, +// (*contextManager).release must be called on the context when the request is +// completed. +// +// Providing this circumvents a lot of necessity for dispatchers with the +// benefit of instantiating the request context much earlier. +// +// TODO(stevvooe): Consider making this facility a part of the context package. +type contextManager struct { + contexts map[*http.Request]context.Context + mu sync.Mutex +} + +// defaultContextManager is just a global instance to register request contexts. +var defaultContextManager = newContextManager() + +func newContextManager() *contextManager { + return &contextManager{ + contexts: make(map[*http.Request]context.Context), + } +} + +// context either returns a new context or looks it up in the manager. +func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { + cm.mu.Lock() + defer cm.mu.Unlock() + + ctx, ok := cm.contexts[r] + if ok { + return ctx + } + + if parent == nil { + parent = ctxu.Background() + } + + ctx = ctxu.WithRequest(parent, r) + ctx, w = ctxu.WithResponseWriter(ctx, w) + ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) + cm.contexts[r] = ctx + + return ctx +} + +// releases frees any associated with resources from request. +func (cm *contextManager) release(ctx context.Context) { + cm.mu.Lock() + defer cm.mu.Unlock() + + r, err := ctxu.GetRequest(ctx) + if err != nil { + ctxu.GetLogger(ctx).Errorf("no request found in context during release") + return + } + delete(cm.contexts, r) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go index d3e88810de27..048603b877e2 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go @@ -8,7 +8,7 @@ import ( // InitFunc is the type of a RegistryMiddleware factory function and is // used to register the constructor for different RegistryMiddleware backends. -type InitFunc func(registry distribution.Registry, options map[string]interface{}) (distribution.Registry, error) +type InitFunc func(registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) var middlewares map[string]InitFunc @@ -28,7 +28,7 @@ func Register(name string, initFunc InitFunc) error { } // Get constructs a RegistryMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, registry distribution.Registry) (distribution.Registry, error) { +func Get(name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { if middlewares != nil { if initFunc, exists := middlewares[name]; exists { return initFunc(registry, options) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go index 975df19f964c..8bab2f5e1d76 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go @@ -18,8 +18,9 @@ import ( // abstraction, providing utility methods that support creating and traversing // backend links. type blobStore struct { - *registry - ctx context.Context + driver storagedriver.StorageDriver + pm *pathMapper + ctx context.Context } // exists reports whether or not the path exists. If the driver returns error diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go new file mode 100644 index 000000000000..a21cefd5745e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go @@ -0,0 +1,98 @@ +// Package cache provides facilities to speed up access to the storage +// backend. Typically cache implementations deal with internal implementation +// details at the backend level, rather than generalized caches for +// distribution related interfaces. In other words, unless the cache is +// specific to the storage package, it belongs in another package. +package cache + +import ( + "fmt" + + "github.com/docker/distribution/digest" + "golang.org/x/net/context" +) + +// ErrNotFound is returned when a meta item is not found. +var ErrNotFound = fmt.Errorf("not found") + +// LayerMeta describes the backend location and length of layer data. +type LayerMeta struct { + Path string + Length int64 +} + +// LayerInfoCache is a driver-aware cache of layer metadata. Basically, it +// provides a fast cache for checks against repository metadata, avoiding +// round trips to backend storage. Note that this is different from a pure +// layer cache, which would also provide access to backing data, as well. Such +// a cache should be implemented as a middleware, rather than integrated with +// the storage backend. +// +// Note that most implementations rely on the caller to do strict checks on on +// repo and dgst arguments, since these are mostly used behind existing +// implementations. +type LayerInfoCache interface { + // Contains returns true if the repository with name contains the layer. + Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) + + // Add includes the layer in the given repository cache. + Add(ctx context.Context, repo string, dgst digest.Digest) error + + // Meta provides the location of the layer on the backend and its size. Membership of a + // repository should be tested before using the result, if required. + Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) + + // SetMeta sets the meta data for the given layer. + SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error +} + +// base implements common checks between cache implementations. Note that +// these are not full checks of input, since that should be done by the +// caller. +type base struct { + LayerInfoCache +} + +func (b *base) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { + if repo == "" { + return false, fmt.Errorf("cache: cannot check for empty repository name") + } + + if dgst == "" { + return false, fmt.Errorf("cache: cannot check for empty digests") + } + + return b.LayerInfoCache.Contains(ctx, repo, dgst) +} + +func (b *base) Add(ctx context.Context, repo string, dgst digest.Digest) error { + if repo == "" { + return fmt.Errorf("cache: cannot add empty repository name") + } + + if dgst == "" { + return fmt.Errorf("cache: cannot add empty digest") + } + + return b.LayerInfoCache.Add(ctx, repo, dgst) +} + +func (b *base) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { + if dgst == "" { + return LayerMeta{}, fmt.Errorf("cache: cannot get meta for empty digest") + } + + return b.LayerInfoCache.Meta(ctx, dgst) +} + +func (b *base) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { + if dgst == "" { + return fmt.Errorf("cache: cannot set meta for empty digest") + } + + if meta.Path == "" { + return fmt.Errorf("cache: cannot set empty path for meta") + } + + return b.LayerInfoCache.SetMeta(ctx, dgst, meta) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache_test.go new file mode 100644 index 000000000000..48cef955ec5f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache_test.go @@ -0,0 +1,86 @@ +package cache + +import ( + "testing" + + "golang.org/x/net/context" +) + +// checkLayerInfoCache takes a cache implementation through a common set of +// operations. If adding new tests, please add them here so new +// implementations get the benefit. +func checkLayerInfoCache(t *testing.T, lic LayerInfoCache) { + ctx := context.Background() + + exists, err := lic.Contains(ctx, "", "fake:abc") + if err == nil { + t.Fatalf("expected error checking for cache item with empty repo") + } + + exists, err = lic.Contains(ctx, "foo/bar", "") + if err == nil { + t.Fatalf("expected error checking for cache item with empty digest") + } + + exists, err = lic.Contains(ctx, "foo/bar", "fake:abc") + if err != nil { + t.Fatalf("unexpected error checking for cache item: %v", err) + } + + if exists { + t.Fatalf("item should not exist") + } + + if err := lic.Add(ctx, "", "fake:abc"); err == nil { + t.Fatalf("expected error adding cache item with empty name") + } + + if err := lic.Add(ctx, "foo/bar", ""); err == nil { + t.Fatalf("expected error adding cache item with empty digest") + } + + if err := lic.Add(ctx, "foo/bar", "fake:abc"); err != nil { + t.Fatalf("unexpected error adding item: %v", err) + } + + exists, err = lic.Contains(ctx, "foo/bar", "fake:abc") + if err != nil { + t.Fatalf("unexpected error checking for cache item: %v", err) + } + + if !exists { + t.Fatalf("item should exist") + } + + _, err = lic.Meta(ctx, "") + if err == nil || err == ErrNotFound { + t.Fatalf("expected error getting meta for cache item with empty digest") + } + + _, err = lic.Meta(ctx, "fake:abc") + if err != ErrNotFound { + t.Fatalf("expected unknown layer error getting meta for cache item with empty digest") + } + + if err = lic.SetMeta(ctx, "", LayerMeta{}); err == nil { + t.Fatalf("expected error setting meta for cache item with empty digest") + } + + if err = lic.SetMeta(ctx, "foo/bar", LayerMeta{}); err == nil { + t.Fatalf("expected error setting meta for cache item with empty meta") + } + + expected := LayerMeta{Path: "/foo/bar", Length: 20} + if err := lic.SetMeta(ctx, "foo/bar", expected); err != nil { + t.Fatalf("unexpected error setting meta: %v", err) + } + + meta, err := lic.Meta(ctx, "foo/bar") + if err != nil { + t.Fatalf("unexpected error getting meta: %v", err) + } + + if meta != expected { + t.Fatalf("retrieved meta data did not match: %v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory.go new file mode 100644 index 000000000000..6d949792502c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory.go @@ -0,0 +1,63 @@ +package cache + +import ( + "github.com/docker/distribution/digest" + "golang.org/x/net/context" +) + +// inmemoryLayerInfoCache is a map-based implementation of LayerInfoCache. +type inmemoryLayerInfoCache struct { + membership map[string]map[digest.Digest]struct{} + meta map[digest.Digest]LayerMeta +} + +// NewInMemoryLayerInfoCache provides an implementation of LayerInfoCache that +// stores results in memory. +func NewInMemoryLayerInfoCache() LayerInfoCache { + return &base{&inmemoryLayerInfoCache{ + membership: make(map[string]map[digest.Digest]struct{}), + meta: make(map[digest.Digest]LayerMeta), + }} +} + +func (ilic *inmemoryLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { + members, ok := ilic.membership[repo] + if !ok { + return false, nil + } + + _, ok = members[dgst] + return ok, nil +} + +// Add adds the layer to the redis repository blob set. +func (ilic *inmemoryLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { + members, ok := ilic.membership[repo] + if !ok { + members = make(map[digest.Digest]struct{}) + ilic.membership[repo] = members + } + + members[dgst] = struct{}{} + + return nil +} + +// Meta retrieves the layer meta data from the redis hash, returning +// ErrUnknownLayer if not found. +func (ilic *inmemoryLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { + meta, ok := ilic.meta[dgst] + if !ok { + return LayerMeta{}, ErrNotFound + } + + return meta, nil +} + +// SetMeta sets the meta data for the given digest using a redis hash. A hash +// is used here since we may store unrelated fields about a layer in the +// future. +func (ilic *inmemoryLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { + ilic.meta[dgst] = meta + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory_test.go new file mode 100644 index 000000000000..417e982e2b4b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory_test.go @@ -0,0 +1,9 @@ +package cache + +import "testing" + +// TestInMemoryLayerInfoCache checks the in memory implementation is working +// correctly. +func TestInMemoryLayerInfoCache(t *testing.T) { + checkLayerInfoCache(t, NewInMemoryLayerInfoCache()) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis.go new file mode 100644 index 000000000000..6b8f7679abe7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis.go @@ -0,0 +1,98 @@ +package cache + +import ( + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/garyburd/redigo/redis" + "golang.org/x/net/context" +) + +// redisLayerInfoCache provides an implementation of storage.LayerInfoCache +// based on redis. Layer info is stored in two parts. The first provide fast +// access to repository membership through a redis set for each repo. The +// second is a redis hash keyed by the digest of the layer, providing path and +// length information. Note that there is no implied relationship between +// these two caches. The layer may exist in one, both or none and the code +// must be written this way. +type redisLayerInfoCache struct { + pool *redis.Pool + + // TODO(stevvooe): We use a pool because we don't have great control over + // the cache lifecycle to manage connections. A new connection if fetched + // for each operation. Once we have better lifecycle management of the + // request objects, we can change this to a connection. +} + +// NewRedisLayerInfoCache returns a new redis-based LayerInfoCache using the +// provided redis connection pool. +func NewRedisLayerInfoCache(pool *redis.Pool) LayerInfoCache { + return &base{&redisLayerInfoCache{ + pool: pool, + }} +} + +// Contains does a membership check on the repository blob set in redis. This +// is used as an access check before looking up global path information. If +// false is returned, the caller should still check the backend to if it +// exists elsewhere. +func (rlic *redisLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { + conn := rlic.pool.Get() + defer conn.Close() + + ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Contains(%q, %q)", repo, dgst) + return redis.Bool(conn.Do("SISMEMBER", rlic.repositoryBlobSetKey(repo), dgst)) +} + +// Add adds the layer to the redis repository blob set. +func (rlic *redisLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { + conn := rlic.pool.Get() + defer conn.Close() + + ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Add(%q, %q)", repo, dgst) + _, err := conn.Do("SADD", rlic.repositoryBlobSetKey(repo), dgst) + return err +} + +// Meta retrieves the layer meta data from the redis hash, returning +// ErrUnknownLayer if not found. +func (rlic *redisLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { + conn := rlic.pool.Get() + defer conn.Close() + + reply, err := redis.Values(conn.Do("HMGET", rlic.blobMetaHashKey(dgst), "path", "length")) + if err != nil { + return LayerMeta{}, err + } + + if len(reply) < 2 || reply[0] == nil || reply[1] == nil { + return LayerMeta{}, ErrNotFound + } + + var meta LayerMeta + if _, err := redis.Scan(reply, &meta.Path, &meta.Length); err != nil { + return LayerMeta{}, err + } + + return meta, nil +} + +// SetMeta sets the meta data for the given digest using a redis hash. A hash +// is used here since we may store unrelated fields about a layer in the +// future. +func (rlic *redisLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { + conn := rlic.pool.Get() + defer conn.Close() + + _, err := conn.Do("HMSET", rlic.blobMetaHashKey(dgst), "path", meta.Path, "length", meta.Length) + return err +} + +// repositoryBlobSetKey returns the key for the blob set in the cache. +func (rlic *redisLayerInfoCache) repositoryBlobSetKey(repo string) string { + return "repository::" + repo + "::blobs" +} + +// blobMetaHashKey returns the cache key for immutable blob meta data. +func (rlic *redisLayerInfoCache) blobMetaHashKey(dgst digest.Digest) string { + return "blobs::" + dgst.String() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis_test.go new file mode 100644 index 000000000000..7422a7ebb0ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis_test.go @@ -0,0 +1,50 @@ +package cache + +import ( + "flag" + "os" + "testing" + "time" + + "github.com/garyburd/redigo/redis" +) + +var redisAddr string + +func init() { + flag.StringVar(&redisAddr, "test.registry.storage.cache.redis.addr", "", "configure the address of a test instance of redis") +} + +// TestRedisLayerInfoCache exercises a live redis instance using the cache +// implementation. +func TestRedisLayerInfoCache(t *testing.T) { + if redisAddr == "" { + // fallback to an environement variable + redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR") + } + + if redisAddr == "" { + // skip if still not set + t.Skip("please set -registry.storage.cache.redis to test layer info cache against redis") + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + return redis.Dial("tcp", redisAddr) + }, + MaxIdle: 1, + MaxActive: 2, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + // Clear the database + if _, err := pool.Get().Do("FLUSHDB"); err != nil { + t.Fatalf("unexpected error flushing redis db: %v", err) + } + + checkLayerInfoCache(t, NewRedisLayerInfoCache(pool)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go index 6ccbff40b013..1473f5230536 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go @@ -15,7 +15,7 @@ import ( "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) const driverName = "azure" @@ -24,6 +24,7 @@ const ( paramAccountName = "accountname" paramAccountKey = "accountkey" paramContainer = "container" + paramRealm = "realm" ) type driver struct { @@ -64,12 +65,17 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { return nil, fmt.Errorf("No %s parameter provided", paramContainer) } - return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container)) + realm, ok := parameters[paramRealm] + if !ok || fmt.Sprint(realm) == "" { + realm = azure.DefaultBaseUrl + } + + return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) } // New constructs a new Driver with the given Azure Storage Account credentials -func New(accountName, accountKey, container string) (*Driver, error) { - api, err := azure.NewBasicClient(accountName, accountKey) +func New(accountName, accountKey, container, realm string) (*Driver, error) { + api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultApiVersion, true) if err != nil { return nil, err } @@ -343,5 +349,5 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) { func is404(err error) bool { e, ok := err.(azure.StorageServiceError) - return ok && e.StatusCode == 404 + return ok && e.StatusCode == http.StatusNotFound } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go index a8fdf3e90093..4990ba19b284 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go @@ -15,6 +15,7 @@ const ( envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" envContainer = "AZURE_STORAGE_CONTAINER" + envRealm = "AZURE_STORAGE_REALM" ) // Hook up gocheck into the "go test" runner. @@ -25,6 +26,7 @@ func init() { accountName string accountKey string container string + realm string ) config := []struct { @@ -34,6 +36,7 @@ func init() { {envAccountName, &accountName}, {envAccountKey, &accountKey}, {envContainer, &container}, + {envRealm, &realm}, } missing := []string{} @@ -45,7 +48,7 @@ func init() { } azureDriverConstructor := func() (storagedriver.StorageDriver, error) { - return New(accountName, accountKey, container) + return New(accountName, accountKey, container, realm) } // Skip Azure storage driver tests if environment variable parameters are not provided @@ -61,5 +64,6 @@ func init() { // paramAccountName: accountName, // paramAccountKey: accountKey, // paramContainer: container, + // paramRealm: realm, // }, skipCheck) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go index d868453f1620..10b2bf216e06 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) // azureBlockStorage is adaptor between azure.BlobStorageClient and diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go index f1e390277768..c29b4742c4d9 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go @@ -6,7 +6,7 @@ import ( "io" "io/ioutil" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) type StorageSimulator struct { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go index 61f41ebcf5bd..f6bda6a86d7e 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go @@ -7,7 +7,7 @@ import ( "sync" "time" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) type blockIDGenerator struct { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go index 46d52a342489..6569e15d7372 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go @@ -4,7 +4,7 @@ import ( "math" "testing" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) func Test_blockIdGenerator(t *testing.T) { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go index c89dd0a34acf..b570d5593a76 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go @@ -5,7 +5,7 @@ import ( "io" "io/ioutil" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) // blockStorage is the interface required from a block storage service diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go index 5201e3b49d9e..2c7480dbf98d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go @@ -9,7 +9,7 @@ import ( "strings" "testing" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) func TestRandomWriter_writeChunkToBlocks(t *testing.T) { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go index 0365ba3cd161..ba7a859d4f17 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go @@ -40,6 +40,7 @@ package base import ( "io" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -51,6 +52,9 @@ type Base struct { // GetContent wraps GetContent of underlying storage driver. func (base *Base) GetContent(path string) ([]byte, error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.GetContent") + if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} } @@ -60,6 +64,9 @@ func (base *Base) GetContent(path string) ([]byte, error) { // PutContent wraps PutContent of underlying storage driver. func (base *Base) PutContent(path string, content []byte) error { + _, done := context.WithTrace(context.Background()) + defer done("Base.PutContent") + if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} } @@ -69,6 +76,9 @@ func (base *Base) PutContent(path string, content []byte) error { // ReadStream wraps ReadStream of underlying storage driver. func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.ReadStream") + if offset < 0 { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } @@ -82,6 +92,9 @@ func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream wraps WriteStream of underlying storage driver. func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.WriteStream") + if offset < 0 { return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } @@ -95,6 +108,9 @@ func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn i // Stat wraps Stat of underlying storage driver. func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.Stat") + if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} } @@ -104,6 +120,9 @@ func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { // List wraps List of underlying storage driver. func (base *Base) List(path string) ([]string, error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.List") + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { return nil, storagedriver.InvalidPathError{Path: path} } @@ -113,6 +132,9 @@ func (base *Base) List(path string) ([]string, error) { // Move wraps Move of underlying storage driver. func (base *Base) Move(sourcePath string, destPath string) error { + _, done := context.WithTrace(context.Background()) + defer done("Base.Move") + if !storagedriver.PathRegexp.MatchString(sourcePath) { return storagedriver.InvalidPathError{Path: sourcePath} } else if !storagedriver.PathRegexp.MatchString(destPath) { @@ -124,6 +146,9 @@ func (base *Base) Move(sourcePath string, destPath string) error { // Delete wraps Delete of underlying storage driver. func (base *Base) Delete(path string) error { + _, done := context.WithTrace(context.Background()) + defer done("Base.Move") + if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} } @@ -133,6 +158,9 @@ func (base *Base) Delete(path string) error { // URLFor wraps URLFor of underlying storage driver. func (base *Base) URLFor(path string, options map[string]interface{}) (string, error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.URLFor") + if !storagedriver.PathRegexp.MatchString(path) { return "", storagedriver.InvalidPathError{Path: path} } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go index 2bf859bc016c..cdefacfd8cff 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go @@ -212,12 +212,17 @@ func (d *dir) move(src, dst string) error { return errNotExists } - s, ok := sp.(*dir).children[srcFilename] + spd, ok := sp.(*dir) + if !ok { + return errIsNotDir // paranoid. + } + + s, ok := spd.children[srcFilename] if !ok { return errNotExists } - delete(sp.(*dir).children, srcFilename) + delete(spd.children, srcFilename) switch n := s.(type) { case *dir: diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go index 2d1553122d9f..aee068a5e629 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go @@ -8,10 +8,10 @@ import ( "encoding/pem" "fmt" "io/ioutil" - "net/url" "time" "github.com/AdRoll/goamz/cloudfront" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" ) @@ -90,23 +90,23 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil } +// S3BucketKeyer is any type that is capable of returning the S3 bucket key +// which should be cached by AWS CloudFront. +type S3BucketKeyer interface { + S3BucketKey(path string) string +} + // Resolve returns an http.Handler which can serve the contents of the given // Layer, or an error if not supported by the storagedriver. func (lh *cloudFrontStorageMiddleware) URLFor(path string, options map[string]interface{}) (string, error) { // TODO(endophage): currently only supports S3 - options["expiry"] = time.Now().Add(lh.duration) - - layerURLStr, err := lh.StorageDriver.URLFor(path, options) - if err != nil { - return "", err - } - - layerURL, err := url.Parse(layerURLStr) - if err != nil { - return "", err + keyer, ok := lh.StorageDriver.(S3BucketKeyer) + if !ok { + context.GetLogger(context.Background()).Warn("the CloudFront middleware does not support this backend storage driver") + return lh.StorageDriver.URLFor(path, options) } - cfURL, err := lh.cloudfront.CannedSignedURL(layerURL.Path, "", time.Now().Add(lh.duration)) + cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) if err != nil { return "", err } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go index d240c9018e2e..402f2eaacec6 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go @@ -695,6 +695,11 @@ func (d *driver) s3Path(path string) string { return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") } +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + func parseError(path string, err error) error { if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { return storagedriver.PathNotFoundError{Path: path} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go index f0fe7feff880..442dc2575a62 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go @@ -83,7 +83,7 @@ type StorageDriver interface { // number of path components separated by slashes, where each component is // restricted to lowercase alphanumeric characters or a period, underscore, or // hyphen. -var PathRegexp = regexp.MustCompile(`^(/[a-z0-9._-]+)+$`) +var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) // ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. var ErrUnsupportedMethod = errors.New("unsupported method") diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go index cfa3a48a4228..74ddab6f865f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go @@ -15,7 +15,6 @@ import ( "time" storagedriver "github.com/docker/distribution/registry/storage/driver" - "gopkg.in/check.v1" ) @@ -137,7 +136,9 @@ func (suite *DriverSuite) TestValidPaths(c *check.C) { "/.abc", "/a--b", "/a-.b", - "/_.abc"} + "/_.abc", + "/Docker/docker-registry", + "/Abc/Cba"} for _, filename := range validFiles { err := suite.StorageDriver.PutContent(filename, contents) @@ -160,8 +161,7 @@ func (suite *DriverSuite) TestInvalidPaths(c *check.C) { "abc", "123.abc", "//bcd", - "/abc_123/", - "/Docker/docker-registry"} + "/abc_123/"} for _, filename := range invalidFiles { err := suite.StorageDriver.PutContent(filename, contents) @@ -591,6 +591,20 @@ func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { c.Assert(received, check.DeepEquals, contents) } +// TestMoveInvalid provides various checks for invalid moves. +func (suite *DriverSuite) TestMoveInvalid(c *check.C) { + contents := randomContents(32) + + // Create a regular file. + err := suite.StorageDriver.PutContent("/notadir", contents) + c.Assert(err, check.IsNil) + defer suite.StorageDriver.Delete("/notadir") + + // Now try to move a non-existent file under it. + err = suite.StorageDriver.Move("/notadir/foo", "/notadir/bar") + c.Assert(err, check.NotNil) // non-nil error +} + // TestDelete checks that the delete operation removes data from the storage // driver func (suite *DriverSuite) TestDelete(c *check.C) { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go index b70b1fb2048c..65d4347fafd1 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go @@ -27,8 +27,8 @@ type fileReader struct { // identifying fields path string - size int64 // size is the total layer size, must be set. - modtime time.Time + size int64 // size is the total size, must be set. + modtime time.Time // TODO(stevvooe): This is not needed anymore. // mutable fields rc io.ReadCloser // remote read closer diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layer_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layer_test.go index 43e028d56289..e225d0685201 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layer_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layer_test.go @@ -11,6 +11,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" @@ -35,7 +36,7 @@ func TestSimpleLayerUpload(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(driver) + registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -143,7 +144,7 @@ func TestSimpleLayerRead(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(driver) + registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -180,7 +181,7 @@ func TestSimpleLayerRead(t *testing.T) { t.Fatalf("unexpected error fetching non-existent layer: %v", err) } - randomLayerDigest, err := writeTestLayer(driver, ls.(*layerStore).repository.pm, imageName, dgst, randomLayerReader) + randomLayerDigest, err := writeTestLayer(driver, defaultPathMapper, imageName, dgst, randomLayerReader) if err != nil { t.Fatalf("unexpected error writing test layer: %v", err) } @@ -252,7 +253,7 @@ func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(driver) + registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layercache.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layercache.go new file mode 100644 index 000000000000..b9732f203eb5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layercache.go @@ -0,0 +1,202 @@ +package storage + +import ( + "expvar" + "sync/atomic" + "time" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/driver" + "golang.org/x/net/context" +) + +// cachedLayerService implements the layer service with path-aware caching, +// using a LayerInfoCache interface. +type cachedLayerService struct { + distribution.LayerService // upstream layer service + repository distribution.Repository + ctx context.Context + driver driver.StorageDriver + *blobStore // global blob store + cache cache.LayerInfoCache +} + +// Exists checks for existence of the digest in the cache, immediately +// returning if it exists for the repository. If not, the upstream is checked. +// When a positive result is found, it is written into the cache. +func (lc *cachedLayerService) Exists(dgst digest.Digest) (bool, error) { + ctxu.GetLogger(lc.ctx).Debugf("(*cachedLayerService).Exists(%q)", dgst) + now := time.Now() + defer func() { + // TODO(stevvooe): Replace this with a decent context-based metrics solution + ctxu.GetLoggerWithField(lc.ctx, "blob.exists.duration", time.Since(now)). + Infof("(*cachedLayerService).Exists(%q)", dgst) + }() + + atomic.AddUint64(&layerInfoCacheMetrics.Exists.Requests, 1) + available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) + if err != nil { + ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) + goto fallback + } + + if available { + atomic.AddUint64(&layerInfoCacheMetrics.Exists.Hits, 1) + return true, nil + } + +fallback: + atomic.AddUint64(&layerInfoCacheMetrics.Exists.Misses, 1) + exists, err := lc.LayerService.Exists(dgst) + if err != nil { + return exists, err + } + + if exists { + // we can only cache this if the existence is positive. + if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { + ctxu.GetLogger(lc.ctx).Errorf("error adding %v@%v to cache: %v", lc.repository.Name(), dgst, err) + } + } + + return exists, err +} + +// Fetch checks for the availability of the layer in the repository via the +// cache. If present, the metadata is resolved and the layer is returned. If +// any operation fails, the layer is read directly from the upstream. The +// results are cached, if possible. +func (lc *cachedLayerService) Fetch(dgst digest.Digest) (distribution.Layer, error) { + ctxu.GetLogger(lc.ctx).Debugf("(*layerInfoCache).Fetch(%q)", dgst) + now := time.Now() + defer func() { + ctxu.GetLoggerWithField(lc.ctx, "blob.fetch.duration", time.Since(now)). + Infof("(*layerInfoCache).Fetch(%q)", dgst) + }() + + atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Requests, 1) + available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) + if err != nil { + ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) + goto fallback + } + + if available { + // fast path: get the layer info and return + meta, err := lc.cache.Meta(lc.ctx, dgst) + if err != nil { + ctxu.GetLogger(lc.ctx).Errorf("error fetching %v@%v from cache: %v", lc.repository.Name(), dgst, err) + goto fallback + } + + atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Hits, 1) + return newLayerReader(lc.driver, dgst, meta.Path, meta.Length) + } + + // NOTE(stevvooe): Unfortunately, the cache here only makes checks for + // existing layers faster. We'd have to provide more careful + // synchronization with the backend to make the missing case as fast. + +fallback: + atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Misses, 1) + layer, err := lc.LayerService.Fetch(dgst) + if err != nil { + return nil, err + } + + // add the layer to the repository + if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { + ctxu.GetLogger(lc.ctx). + Errorf("error caching repository relationship for %v@%v: %v", lc.repository.Name(), dgst, err) + } + + // lookup layer path and add it to the cache, if it succeds. Note that we + // still return the layer even if we have trouble caching it. + if path, err := lc.resolveLayerPath(layer); err != nil { + ctxu.GetLogger(lc.ctx). + Errorf("error resolving path while caching %v@%v: %v", lc.repository.Name(), dgst, err) + } else { + // add the layer to the cache once we've resolved the path. + if err := lc.cache.SetMeta(lc.ctx, dgst, cache.LayerMeta{Path: path, Length: layer.Length()}); err != nil { + ctxu.GetLogger(lc.ctx).Errorf("error adding meta for %v@%v to cache: %v", lc.repository.Name(), dgst, err) + } + } + + return layer, err +} + +// extractLayerInfo pulls the layerInfo from the layer, attempting to get the +// path information from either the concrete object or by resolving the +// primary blob store path. +func (lc *cachedLayerService) resolveLayerPath(layer distribution.Layer) (path string, err error) { + // try and resolve the type and driver, so we don't have to traverse links + switch v := layer.(type) { + case *layerReader: + // only set path if we have same driver instance. + if v.driver == lc.driver { + return v.path, nil + } + } + + ctxu.GetLogger(lc.ctx).Warnf("resolving layer path during cache lookup (%v@%v)", lc.repository.Name(), layer.Digest()) + // we have to do an expensive stat to resolve the layer location but no + // need to check the link, since we already have layer instance for this + // repository. + bp, err := lc.blobStore.path(layer.Digest()) + if err != nil { + return "", err + } + + return bp, nil +} + +// layerInfoCacheMetrics keeps track of cache metrics for layer info cache +// requests. Note this is kept globally and made available via expvar. For +// more detailed metrics, its recommend to instrument a particular cache +// implementation. +var layerInfoCacheMetrics struct { + // Exists tracks calls to the Exists caches. + Exists struct { + Requests uint64 + Hits uint64 + Misses uint64 + } + + // Fetch tracks calls to the fetch caches. + Fetch struct { + Requests uint64 + Hits uint64 + Misses uint64 + } +} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + cache := registry.(*expvar.Map).Get("cache") + if cache == nil { + cache = &expvar.Map{} + cache.(*expvar.Map).Init() + registry.(*expvar.Map).Set("cache", cache) + } + + storage := cache.(*expvar.Map).Get("storage") + if storage == nil { + storage = &expvar.Map{} + storage.(*expvar.Map).Init() + cache.(*expvar.Map).Set("storage", storage) + } + + storage.(*expvar.Map).Set("layerinfo", expvar.Func(func() interface{} { + // no need for synchronous access: the increments are atomic and + // during reading, we don't care if the data is up to date. The + // numbers will always *eventually* be reported correctly. + return layerInfoCacheMetrics + })) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerreader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerreader.go index 414951d9a560..40deba6a70e6 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerreader.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerreader.go @@ -17,6 +17,21 @@ type layerReader struct { digest digest.Digest } +// newLayerReader returns a new layerReader with the digest, path and length, +// eliding round trips to the storage backend. +func newLayerReader(driver driver.StorageDriver, dgst digest.Digest, path string, length int64) (*layerReader, error) { + fr := &fileReader{ + driver: driver, + path: path, + size: length, + } + + return &layerReader{ + fileReader: *fr, + digest: dgst, + }, nil +} + var _ distribution.Layer = &layerReader{} func (lr *layerReader) Digest() digest.Digest { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerstore.go index 05881749e2c8..1c7428a9f37a 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerstore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerstore.go @@ -138,12 +138,16 @@ func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (di return nil, err } - return &layerWriter{ + lw := &layerWriter{ layerStore: ls, uuid: uuid, startedAt: startedAt, bufferedFileWriter: *fw, - }, nil + } + + lw.setupResumableDigester() + + return lw, nil } func (ls *layerStore) path(dgst digest.Digest) (string, error) { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter.go index 27bbade126f8..1e5ea9187ffd 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter.go @@ -3,7 +3,9 @@ package storage import ( "fmt" "io" + "os" "path" + "strconv" "time" "github.com/Sirupsen/logrus" @@ -20,10 +22,11 @@ var _ distribution.LayerUpload = &layerWriter{} type layerWriter struct { layerStore *layerStore - uuid string - startedAt time.Time + uuid string + startedAt time.Time + resumableDigester digest.ResumableDigester - // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisy + // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy // LayerUpload Interface bufferedFileWriter } @@ -83,37 +86,244 @@ func (lw *layerWriter) Cancel() error { return nil } -// validateLayer checks the layer data against the digest, returning an error -// if it does not match. The canonical digest is returned. -func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) { - digestVerifier, err := digest.NewDigestVerifier(dgst) +func (lw *layerWriter) Write(p []byte) (int, error) { + if lw.resumableDigester == nil { + return lw.bufferedFileWriter.Write(p) + } + + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := lw.resumeHashAt(lw.offset); err != nil { + return 0, err + } + + return io.MultiWriter(&lw.bufferedFileWriter, lw.resumableDigester).Write(p) +} + +func (lw *layerWriter) ReadFrom(r io.Reader) (n int64, err error) { + if lw.resumableDigester == nil { + return lw.bufferedFileWriter.ReadFrom(r) + } + + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := lw.resumeHashAt(lw.offset); err != nil { + return 0, err + } + + return lw.bufferedFileWriter.ReadFrom(io.TeeReader(r, lw.resumableDigester)) +} + +func (lw *layerWriter) Close() error { + if lw.err != nil { + return lw.err + } + + if lw.resumableDigester != nil { + if err := lw.storeHashState(); err != nil { + return err + } + } + + return lw.bufferedFileWriter.Close() +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (lw *layerWriter) getStoredHashStates() ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := lw.layerStore.repository.registry.pm.path(uploadHashStatePathSpec{ + name: lw.layerStore.repository.Name(), + uuid: lw.uuid, + alg: lw.resumableDigester.Digest().Algorithm(), + list: true, + }) + if err != nil { + return nil, err + } + + paths, err := lw.driver.List(uploadHashStatePathPrefix) if err != nil { - return "", err + if _, ok := err.(storagedriver.PathNotFoundError); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +// resumeHashAt attempts to restore the state of the internal hash function +// by loading the most recent saved hash state less than or equal to the given +// offset. Any unhashed bytes remaining less than the given offset are hashed +// from the content uploaded so far. +func (lw *layerWriter) resumeHashAt(offset int64) error { + if offset < 0 { + return fmt.Errorf("cannot resume hash at negative offset: %d", offset) } - // TODO(stevvooe): Store resumable hash calculations in upload directory - // in driver. Something like a file at path /resumablehash/ - // with the hash state up to that point would be perfect. The hasher would - // then only have to fetch the difference. + if offset == int64(lw.resumableDigester.Len()) { + // State of digester is already at the requseted offset. + return nil + } - // Read the file from the backend driver and validate it. - fr, err := newFileReader(lw.bufferedFileWriter.driver, lw.path) + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := lw.getStoredHashStates() if err != nil { - return "", err + return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) + } + + // Find the highest stored hashState with offset less than or equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { + // This offset is closer to the requested offset. + hashStateMatch = hashState + } else if hashState.offset > offset { + // Remove any stored hash state with offsets higher than this one + // as writes to this resumed hasher will make those invalid. This + // is probably okay to skip for now since we don't expect anyone to + // use the API in this way. For that reason, we don't treat an + // an error here as a fatal error, but only log it. + if err := lw.driver.Delete(hashState.path); err != nil { + logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) + } + } } - tr := io.TeeReader(fr, digestVerifier) + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + lw.resumableDigester.Reset() + } else { + storedState, err := lw.driver.GetContent(hashStateMatch.path) + if err != nil { + return err + } - // TODO(stevvooe): This is one of the places we need a Digester write - // sink. Instead, its read driven. This might be okay. + if err = lw.resumableDigester.Restore(storedState); err != nil { + return err + } + } + + // Mind the gap. + if gapLen := offset - int64(lw.resumableDigester.Len()); gapLen > 0 { + // Need to read content from the upload to catch up to the desired + // offset. + fr, err := newFileReader(lw.driver, lw.path) + if err != nil { + return err + } + + if _, err = fr.Seek(int64(lw.resumableDigester.Len()), os.SEEK_SET); err != nil { + return fmt.Errorf("unable to seek to layer reader offset %d: %s", lw.resumableDigester.Len(), err) + } + + if _, err := io.CopyN(lw.resumableDigester, fr, gapLen); err != nil { + return err + } + } + + return nil +} + +func (lw *layerWriter) storeHashState() error { + uploadHashStatePath, err := lw.layerStore.repository.registry.pm.path(uploadHashStatePathSpec{ + name: lw.layerStore.repository.Name(), + uuid: lw.uuid, + alg: lw.resumableDigester.Digest().Algorithm(), + offset: int64(lw.resumableDigester.Len()), + }) + if err != nil { + return err + } - // Calculate an updated digest with the latest version. - canonical, err := digest.FromReader(tr) + hashState, err := lw.resumableDigester.State() if err != nil { - return "", err + return err + } + + return lw.driver.PutContent(uploadHashStatePath, hashState) +} + +// validateLayer checks the layer data against the digest, returning an error +// if it does not match. The canonical digest is returned. +func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) { + var ( + verified, fullHash bool + canonical digest.Digest + ) + + if lw.resumableDigester != nil { + // Restore the hasher state to the end of the upload. + if err := lw.resumeHashAt(lw.size); err != nil { + return "", err + } + + canonical = lw.resumableDigester.Digest() + + if canonical.Algorithm() == dgst.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = dgst == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + fullHash = true + } + } else { + // Not using resumable digests, so we need to hash the entire layer. + fullHash = true + } + + if fullHash { + digester := digest.NewCanonicalDigester() + + digestVerifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return "", err + } + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(lw.bufferedFileWriter.driver, lw.path) + if err != nil { + return "", err + } + + tr := io.TeeReader(fr, digester) + + if _, err = io.Copy(digestVerifier, tr); err != nil { + return "", err + } + + canonical = digester.Digest() + verified = digestVerifier.Verified() } - if !digestVerifier.Verified() { + if !verified { return "", distribution.ErrLayerInvalidDigest{ Digest: dgst, Reason: fmt.Errorf("content does not match digest"), diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_nonresumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_nonresumable.go new file mode 100644 index 000000000000..d4350c6b843a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_nonresumable.go @@ -0,0 +1,6 @@ +// +build noresumabledigest + +package storage + +func (lw *layerWriter) setupResumableDigester() { +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_resumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_resumable.go new file mode 100644 index 000000000000..7d8c63354cf2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_resumable.go @@ -0,0 +1,9 @@ +// +build !noresumabledigest + +package storage + +import "github.com/docker/distribution/digest" + +func (lw *layerWriter) setupResumableDigester() { + lw.resumableDigester = digest.NewCanonicalResumableDigester() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go index 664df0fadcbc..1026c8aee050 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go @@ -6,6 +6,8 @@ import ( "reflect" "testing" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" @@ -19,7 +21,7 @@ import ( type manifestStoreTestEnv struct { ctx context.Context driver driver.StorageDriver - registry distribution.Registry + registry distribution.Namespace repository distribution.Repository name string tag string @@ -28,7 +30,7 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry := NewRegistryWithDriver(driver) + registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) repo, err := registry.Repository(ctx, name) if err != nil { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go index 179e7b783012..7aeff6e440eb 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go @@ -33,6 +33,7 @@ const storagePathVersion = "v2" // -> _uploads/ // data // startedat +// hashstates// // -> blob/ // // @@ -87,6 +88,7 @@ const storagePathVersion = "v2" // // uploadDataPathSpec: /v2/repositories//_uploads//data // uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat +// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// // // Blob Store: // @@ -249,6 +251,14 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "data")...), nil case uploadStartedAtPathSpec: return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "startedat")...), nil + case uploadHashStatePathSpec: + offset := fmt.Sprintf("%d", v.offset) + if v.list { + offset = "" // Limit to the prefix for listing offsets. + } + return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "hashstates", v.alg, offset)...), nil + case repositoriesRootPathSpec: + return path.Join(repoPrefix...), nil default: // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). return "", fmt.Errorf("unknown path spec: %#v", v) @@ -424,6 +434,26 @@ type uploadStartedAtPathSpec struct { func (uploadStartedAtPathSpec) pathSpec() {} +// uploadHashStatePathSpec defines the path parameters for the file that stores +// the hash function state of an upload at a specific byte offset. If `list` is +// set, then the path mapper will generate a list prefix for all hash state +// offsets for the upload identified by the name, uuid, and alg. +type uploadHashStatePathSpec struct { + name string + uuid string + alg string + offset int64 + list bool +} + +func (uploadHashStatePathSpec) pathSpec() {} + +// repositoriesRootPathSpec returns the root of repositories +type repositoriesRootPathSpec struct { +} + +func (repositoriesRootPathSpec) pathSpec() {} + // digestPathComponents provides a consistent path breakdown for a given // digest. For a generic digest, it will be as follows: // diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go new file mode 100644 index 000000000000..13c468dedfe3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go @@ -0,0 +1,136 @@ +package storage + +import ( + "path" + "strings" + "time" + + "code.google.com/p/go-uuid/uuid" + log "github.com/Sirupsen/logrus" + storageDriver "github.com/docker/distribution/registry/storage/driver" +) + +// uploadData stored the location of temporary files created during a layer upload +// along with the date the upload was started +type uploadData struct { + containingDir string + startedAt time.Time +} + +func newUploadData() uploadData { + return uploadData{ + containingDir: "", + // default to far in future to protect against missing startedat + startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), + } +} + +// PurgeUploads deletes files from the upload directory +// created before olderThan. The list of files deleted and errors +// encountered are returned +func PurgeUploads(driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { + log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) + uploadData, errors := getOutstandingUploads(driver) + var deleted []string + for _, uploadData := range uploadData { + if uploadData.startedAt.Before(olderThan) { + var err error + log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", + uploadData.containingDir, uploadData.startedAt, olderThan) + if actuallyDelete { + err = driver.Delete(uploadData.containingDir) + } + if err == nil { + deleted = append(deleted, uploadData.containingDir) + } else { + errors = append(errors, err) + } + } + } + + log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) + return deleted, errors +} + +// getOutstandingUploads walks the upload directory, collecting files +// which could be eligible for deletion. The only reliable way to +// classify the age of a file is with the date stored in the startedAt +// file, so gather files by UUID with a date from startedAt. +func getOutstandingUploads(driver storageDriver.StorageDriver) (map[string]uploadData, []error) { + var errors []error + uploads := make(map[string]uploadData, 0) + + inUploadDir := false + root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) + if err != nil { + return uploads, append(errors, err) + } + err = Walk(driver, root, func(fileInfo storageDriver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + if file[0] == '_' { + // Reserved directory + inUploadDir = (file == "_uploads") + + if fileInfo.IsDir() && !inUploadDir { + return ErrSkipDir + } + + } + + uuid, isContainingDir := uUIDFromPath(filePath) + if uuid == "" { + // Cannot reliably delete + return nil + } + ud, ok := uploads[uuid] + if !ok { + ud = newUploadData() + } + if isContainingDir { + ud.containingDir = filePath + } + if file == "startedat" { + if t, err := readStartedAtFile(driver, filePath); err == nil { + ud.startedAt = t + } else { + errors = pushError(errors, filePath, err) + } + + } + + uploads[uuid] = ud + return nil + }) + + if err != nil { + errors = pushError(errors, root, err) + } + return uploads, errors +} + +// uUIDFromPath extracts the upload UUID from a given path +// If the UUID is the last path component, this is the containing +// directory for all upload files +func uUIDFromPath(path string) (string, bool) { + components := strings.Split(path, "/") + for i := len(components) - 1; i >= 0; i-- { + if uuid := uuid.Parse(components[i]); uuid != nil { + return uuid.String(), i == len(components)-1 + } + } + return "", false +} + +// readStartedAtFile reads the date from an upload's startedAtFile +func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { + startedAtBytes, err := driver.GetContent(path) + if err != nil { + return time.Now(), err + } + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return time.Now(), err + } + return startedAt, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go new file mode 100644 index 000000000000..368e7c86da4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go @@ -0,0 +1,165 @@ +package storage + +import ( + "path" + "strings" + "testing" + "time" + + "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +var pm = defaultPathMapper + +func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) driver.StorageDriver { + d := inmemory.New() + for i := 0; i < numUploads; i++ { + addUploads(t, d, uuid.New(), repoName, startedAt) + } + return d +} + +func addUploads(t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { + dataPath, err := pm.path(uploadDataPathSpec{name: repo, uuid: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + if err := d.PutContent(dataPath, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, uuid: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + + if d.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + t.Fatalf("Unable to write startedAt file") + } + +} + +func TestPurgeGather(t *testing.T) { + uploadCount := 5 + fs := testUploadFS(t, uploadCount, "test-repo", time.Now()) + uploadData, errs := getOutstandingUploads(fs) + if len(errs) != 0 { + t.Errorf("Unexepected errors: %q", errs) + } + if len(uploadData) != uploadCount { + t.Errorf("Unexpected upload file count: %d != %d", uploadCount, len(uploadData)) + } +} + +func TestPurgeNone(t *testing.T) { + fs := testUploadFS(t, 10, "test-repo", time.Now()) + oneHourAgo := time.Now().Add(-1 * time.Hour) + deleted, errs := PurgeUploads(fs, oneHourAgo, true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + if len(deleted) != 0 { + t.Errorf("Unexpectedly deleted files for time: %s", oneHourAgo) + } +} + +func TestPurgeAll(t *testing.T) { + uploadCount := 10 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) + + // Ensure > 1 repos are purged + addUploads(t, fs, uuid.New(), "test-repo2", oneHourAgo) + uploadCount++ + + deleted, errs := PurgeUploads(fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + fileCount := uploadCount + if len(deleted) != fileCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), fileCount) + } +} + +func TestPurgeSome(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) + + newUploadCount := 4 + + for i := 0; i < newUploadCount; i++ { + addUploads(t, fs, uuid.New(), "test-repo", time.Now().Add(1*time.Hour)) + } + + deleted, errs := PurgeUploads(fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + if len(deleted) != oldUploadCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), oldUploadCount) + } +} + +func TestPurgeOnlyUploads(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) + + // Create a directory tree outside _uploads and ensure + // these files aren't deleted. + dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", uuid: uuid.New()}) + if err != nil { + t.Fatalf(err.Error()) + } + nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) + if strings.Index(nonUploadPath, "_upload") != -1 { + t.Fatalf("Non-upload path not created correctly") + } + + nonUploadFile := path.Join(nonUploadPath, "file") + if err = fs.PutContent(nonUploadFile, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + deleted, errs := PurgeUploads(fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + for _, file := range deleted { + if strings.Index(file, "_upload") == -1 { + t.Errorf("Non-upload file deleted") + } + } +} + +func TestPurgeMissingStartedAt(t *testing.T) { + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs := testUploadFS(t, 1, "test-repo", oneHourAgo) + err := Walk(fs, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + + if file == "startedat" { + if err := fs.Delete(filePath); err != nil { + t.Fatalf("Unable to delete startedat file: %s", filePath) + } + } + return nil + }) + if err != nil { + t.Fatalf("Unexpected error during Walk: %s ", err.Error()) + } + deleted, errs := PurgeUploads(fs, time.Now(), true) + if len(errs) > 0 { + t.Errorf("Unexpected errors") + } + if len(deleted) > 0 { + t.Errorf("Files unexpectedly deleted: %s", deleted) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go index 8d7ea16ecb00..1126db457200 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go @@ -3,6 +3,7 @@ package storage import ( "github.com/docker/distribution" "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" "golang.org/x/net/context" ) @@ -10,28 +11,35 @@ import ( // registry is the top-level implementation of Registry for use in the storage // package. All instances should descend from this object. type registry struct { - driver storagedriver.StorageDriver - pm *pathMapper - blobStore *blobStore + driver storagedriver.StorageDriver + pm *pathMapper + blobStore *blobStore + layerInfoCache cache.LayerInfoCache } // NewRegistryWithDriver creates a new registry instance from the provided // driver. The resulting registry may be shared by multiple goroutines but is // cheap to allocate. -func NewRegistryWithDriver(driver storagedriver.StorageDriver) distribution.Registry { - bs := &blobStore{} +func NewRegistryWithDriver(driver storagedriver.StorageDriver, layerInfoCache cache.LayerInfoCache) distribution.Namespace { + bs := &blobStore{ + driver: driver, + pm: defaultPathMapper, + } - reg := ®istry{ + return ®istry{ driver: driver, blobStore: bs, // TODO(sday): This should be configurable. - pm: defaultPathMapper, + pm: defaultPathMapper, + layerInfoCache: layerInfoCache, } +} - reg.blobStore.registry = reg - - return reg +// Scope returns the namespace scope for a registry. The registry +// will only serve repositories contained within this scope. +func (reg *registry) Scope() distribution.Scope { + return distribution.GlobalScope } // Repository returns an instance of the repository tied to the registry. @@ -83,9 +91,29 @@ func (repo *repository) Manifests() distribution.ManifestService { // may be context sensitive in the future. The instance should be used similar // to a request local. func (repo *repository) Layers() distribution.LayerService { - return &layerStore{ + ls := &layerStore{ repository: repo, } + + if repo.registry.layerInfoCache != nil { + // TODO(stevvooe): This is not the best place to setup a cache. We would + // really like to decouple the cache from the backend but also have the + // manifeset service use the layer service cache. For now, we can simply + // integrate the cache directly. The main issue is that we have layer + // access and layer data coupled in a single object. Work is already under + // way to decouple this. + + return &cachedLayerService{ + LayerService: ls, + repository: repo, + ctx: repo.ctx, + driver: repo.driver, + blobStore: repo.blobStore, + cache: repo.registry.layerInfoCache, + } + } + + return ls } func (repo *repository) Signatures() distribution.SignatureService { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go index abc52ca6e5c2..7094b69e274f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go @@ -2,8 +2,10 @@ package storage import ( "path" + "sync" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" ) @@ -33,21 +35,59 @@ func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { return nil, err } - var signatures [][]byte - for _, sigPath := range signaturePaths { + var wg sync.WaitGroup + type result struct { + index int + signature []byte + err error + } + ch := make(chan result) + + for i, sigPath := range signaturePaths { // Append the link portion sigPath = path.Join(sigPath, "link") - // TODO(stevvooe): These fetches should be parallelized for performance. - p, err := s.blobStore.linked(sigPath) - if err != nil { - return nil, err + wg.Add(1) + go func(idx int, sigPath string) { + defer wg.Done() + context.GetLogger(s.ctx). + Debugf("fetching signature from %q", sigPath) + + r := result{index: idx} + if p, err := s.blobStore.linked(sigPath); err != nil { + context.GetLogger(s.ctx). + Errorf("error fetching signature from %q: %v", sigPath, err) + r.err = err + } else { + r.signature = p + } + + ch <- r + }(i, sigPath) + } + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + // aggregrate the results + signatures := make([][]byte, len(signaturePaths)) +loop: + for { + select { + case result := <-ch: + signatures[result.index] = result.signature + if result.err != nil && err == nil { + // only set the first one. + err = result.err + } + case <-done: + break loop } - - signatures = append(signatures, p) } - return signatures, nil + return signatures, err } func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go new file mode 100644 index 000000000000..7b958d879ce3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go @@ -0,0 +1,50 @@ +package storage + +import ( + "errors" + "fmt" + + storageDriver "github.com/docker/distribution/registry/storage/driver" +) + +// SkipDir is used as a return value from onFileFunc to indicate that +// the directory named in the call is to be skipped. It is not returned +// as an error by any function. +var ErrSkipDir = errors.New("skip this directory") + +// WalkFn is called once per file by Walk +// If the returned error is ErrSkipDir and fileInfo refers +// to a directory, the directory will not be entered and Walk +// will continue the traversal. Otherwise Walk will return +type WalkFn func(fileInfo storageDriver.FileInfo) error + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func Walk(driver storageDriver.StorageDriver, from string, f WalkFn) error { + children, err := driver.List(from) + if err != nil { + return err + } + for _, child := range children { + fileInfo, err := driver.Stat(child) + if err != nil { + return err + } + err = f(fileInfo) + skipDir := (err == ErrSkipDir) + if err != nil && !skipDir { + return err + } + + if fileInfo.IsDir() && !skipDir { + Walk(driver, child, f) + } + } + return nil +} + +// pushError formats an error type given a path and an error +// and pushes it to a slice of errors +func pushError(errors []error, path string, err error) []error { + return append(errors, fmt.Errorf("%s: %s", path, err)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go new file mode 100644 index 000000000000..22b91b35627b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go @@ -0,0 +1,119 @@ +package storage + +import ( + "fmt" + "testing" + + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func testFS(t *testing.T) (driver.StorageDriver, map[string]string) { + d := inmemory.New() + c := []byte("") + if err := d.PutContent("/a/b/c/d", c); err != nil { + t.Fatalf("Unable to put to inmemory fs") + } + if err := d.PutContent("/a/b/c/e", c); err != nil { + t.Fatalf("Unable to put to inmemory fs") + } + + expected := map[string]string{ + "/a": "dir", + "/a/b": "dir", + "/a/b/c": "dir", + "/a/b/c/d": "file", + "/a/b/c/e": "file", + } + + return d, expected +} + +func TestWalkErrors(t *testing.T) { + d, expected := testFS(t) + fileCount := len(expected) + err := Walk(d, "", func(fileInfo driver.FileInfo) error { + return nil + }) + if err == nil { + t.Error("Expected invalid root err") + } + + err = Walk(d, "/", func(fileInfo driver.FileInfo) error { + // error on the 2nd file + if fileInfo.Path() == "/a/b" { + return fmt.Errorf("Early termination") + } + delete(expected, fileInfo.Path()) + return nil + }) + if len(expected) != fileCount-1 { + t.Error("Walk failed to terminate with error") + } + if err != nil { + t.Error(err.Error()) + } + + err = Walk(d, "/nonexistant", func(fileInfo driver.FileInfo) error { + return nil + }) + if err == nil { + t.Errorf("Expected missing file err") + } + +} + +func TestWalk(t *testing.T) { + d, expected := testFS(t) + err := Walk(d, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + filetype, ok := expected[filePath] + if !ok { + t.Fatalf("Unexpected file in walk: %q", filePath) + } + + if fileInfo.IsDir() { + if filetype != "dir" { + t.Errorf("Unexpected file type: %q", filePath) + } + } else { + if filetype != "file" { + t.Errorf("Unexpected file type: %q", filePath) + } + } + delete(expected, filePath) + return nil + }) + if len(expected) > 0 { + t.Errorf("Missed files in walk: %q", expected) + } + if err != nil { + t.Fatalf(err.Error()) + } +} + +func TestWalkSkipDir(t *testing.T) { + d, expected := testFS(t) + err := Walk(d, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + if filePath == "/a/b" { + // skip processing /a/b/c and /a/b/c/d + return ErrSkipDir + } + delete(expected, filePath) + return nil + }) + if err != nil { + t.Fatalf(err.Error()) + } + if _, ok := expected["/a/b/c"]; !ok { + t.Errorf("/a/b/c not skipped") + } + if _, ok := expected["/a/b/c/d"]; !ok { + t.Errorf("/a/b/c/d not skipped") + } + if _, ok := expected["/a/b/c/e"]; !ok { + t.Errorf("/a/b/c/e not skipped") + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/version.go b/Godeps/_workspace/src/github.com/docker/distribution/version/version.go index 5970ec157a40..3a542f9b6fe7 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/version/version.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/version/version.go @@ -8,4 +8,4 @@ var Package = "github.com/docker/distribution" // the latest release tag by hand, always suffixed by "+unknown". During // build, it will be replaced by the actual version. The value here will be // used if the registry is run after a go get based install. -var Version = "v2.0.0-alpha.2+unknown" +var Version = "v2.0.0+unknown" diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go new file mode 100644 index 000000000000..ce78eff6fdd5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go @@ -0,0 +1,45 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package internal + +import ( + "strings" +) + +const ( + WatchState = 1 << iota + MultiState + SubscribeState + MonitorState +) + +type CommandInfo struct { + Set, Clear int +} + +var commandInfos = map[string]CommandInfo{ + "WATCH": {Set: WatchState}, + "UNWATCH": {Clear: WatchState}, + "MULTI": {Set: MultiState}, + "EXEC": {Clear: WatchState | MultiState}, + "DISCARD": {Clear: WatchState | MultiState}, + "PSUBSCRIBE": {Set: SubscribeState}, + "SUBSCRIBE": {Set: SubscribeState}, + "MONITOR": {Set: MonitorState}, +} + +func LookupCommandInfo(commandName string) CommandInfo { + return commandInfos[strings.ToUpper(commandName)] +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go new file mode 100644 index 000000000000..5f955c424483 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go @@ -0,0 +1,65 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redistest contains utilities for writing Redigo tests. +package redistest + +import ( + "errors" + "time" + + "github.com/garyburd/redigo/redis" +) + +type testConn struct { + redis.Conn +} + +func (t testConn) Close() error { + _, err := t.Conn.Do("SELECT", "9") + if err != nil { + return nil + } + _, err = t.Conn.Do("FLUSHDB") + if err != nil { + return err + } + return t.Conn.Close() +} + +// Dial dials the local Redis server and selects database 9. To prevent +// stomping on real data, DialTestDB fails if database 9 contains data. The +// returned connection flushes database 9 on close. +func Dial() (redis.Conn, error) { + c, err := redis.DialTimeout("tcp", ":6379", 0, 1*time.Second, 1*time.Second) + if err != nil { + return nil, err + } + + _, err = c.Do("SELECT", "9") + if err != nil { + return nil, err + } + + n, err := redis.Int(c.Do("DBSIZE")) + if err != nil { + return nil, err + } + + if n != 0 { + return nil, errors.New("database #9 is not empty, test can not continue") + } + + return testConn{c}, nil +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go new file mode 100644 index 000000000000..ac0e971c4ea5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go @@ -0,0 +1,455 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "net" + "strconv" + "sync" + "time" +) + +// conn is the low-level implementation of Conn +type conn struct { + + // Shared + mu sync.Mutex + pending int + err error + conn net.Conn + + // Read + readTimeout time.Duration + br *bufio.Reader + + // Write + writeTimeout time.Duration + bw *bufio.Writer + + // Scratch space for formatting argument length. + // '*' or '$', length, "\r\n" + lenScratch [32]byte + + // Scratch space for formatting integers and floats. + numScratch [40]byte +} + +// Dial connects to the Redis server at the given network and address. +func Dial(network, address string) (Conn, error) { + dialer := xDialer{} + return dialer.Dial(network, address) +} + +// DialTimeout acts like Dial but takes timeouts for establishing the +// connection to the server, writing a command and reading a reply. +func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { + netDialer := net.Dialer{Timeout: connectTimeout} + dialer := xDialer{ + NetDial: netDialer.Dial, + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + } + return dialer.Dial(network, address) +} + +// A Dialer specifies options for connecting to a Redis server. +type xDialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, then net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // ReadTimeout specifies the timeout for reading a single command + // reply. If ReadTimeout is zero, then no timeout is used. + ReadTimeout time.Duration + + // WriteTimeout specifies the timeout for writing a single command. If + // WriteTimeout is zero, then no timeout is used. + WriteTimeout time.Duration +} + +// Dial connects to the Redis server at address on the named network. +func (d *xDialer) Dial(network, address string) (Conn, error) { + dial := d.NetDial + if dial == nil { + dial = net.Dial + } + netConn, err := dial(network, address) + if err != nil { + return nil, err + } + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: d.ReadTimeout, + writeTimeout: d.WriteTimeout, + }, nil +} + +// NewConn returns a new Redigo connection for the given net connection. +func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: readTimeout, + writeTimeout: writeTimeout, + } +} + +func (c *conn) Close() error { + c.mu.Lock() + err := c.err + if c.err == nil { + c.err = errors.New("redigo: closed") + err = c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) fatal(err error) error { + c.mu.Lock() + if c.err == nil { + c.err = err + // Close connection to force errors on subsequent calls and to unblock + // other reader or writer. + c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) Err() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func (c *conn) writeLen(prefix byte, n int) error { + c.lenScratch[len(c.lenScratch)-1] = '\n' + c.lenScratch[len(c.lenScratch)-2] = '\r' + i := len(c.lenScratch) - 3 + for { + c.lenScratch[i] = byte('0' + n%10) + i -= 1 + n = n / 10 + if n == 0 { + break + } + } + c.lenScratch[i] = prefix + _, err := c.bw.Write(c.lenScratch[i:]) + return err +} + +func (c *conn) writeString(s string) error { + c.writeLen('$', len(s)) + c.bw.WriteString(s) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeBytes(p []byte) error { + c.writeLen('$', len(p)) + c.bw.Write(p) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeInt64(n int64) error { + return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) +} + +func (c *conn) writeFloat64(n float64) error { + return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) +} + +func (c *conn) writeCommand(cmd string, args []interface{}) (err error) { + c.writeLen('*', 1+len(args)) + err = c.writeString(cmd) + for _, arg := range args { + if err != nil { + break + } + switch arg := arg.(type) { + case string: + err = c.writeString(arg) + case []byte: + err = c.writeBytes(arg) + case int: + err = c.writeInt64(int64(arg)) + case int64: + err = c.writeInt64(arg) + case float64: + err = c.writeFloat64(arg) + case bool: + if arg { + err = c.writeString("1") + } else { + err = c.writeString("0") + } + case nil: + err = c.writeString("") + default: + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + err = c.writeBytes(buf.Bytes()) + } + } + return err +} + +type protocolError string + +func (pe protocolError) Error() string { + return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) +} + +func (c *conn) readLine() ([]byte, error) { + p, err := c.br.ReadSlice('\n') + if err == bufio.ErrBufferFull { + return nil, protocolError("long response line") + } + if err != nil { + return nil, err + } + i := len(p) - 2 + if i < 0 || p[i] != '\r' { + return nil, protocolError("bad response line terminator") + } + return p[:i], nil +} + +// parseLen parses bulk string and array lengths. +func parseLen(p []byte) (int, error) { + if len(p) == 0 { + return -1, protocolError("malformed length") + } + + if p[0] == '-' && len(p) == 2 && p[1] == '1' { + // handle $-1 and $-1 null replies. + return -1, nil + } + + var n int + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return -1, protocolError("illegal bytes in length") + } + n += int(b - '0') + } + + return n, nil +} + +// parseInt parses an integer reply. +func parseInt(p []byte) (interface{}, error) { + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + + var negate bool + if p[0] == '-' { + negate = true + p = p[1:] + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + } + + var n int64 + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return 0, protocolError("illegal bytes in length") + } + n += int64(b - '0') + } + + if negate { + n = -n + } + return n, nil +} + +var ( + okReply interface{} = "OK" + pongReply interface{} = "PONG" +) + +func (c *conn) readReply() (interface{}, error) { + line, err := c.readLine() + if err != nil { + return nil, err + } + if len(line) == 0 { + return nil, protocolError("short response line") + } + switch line[0] { + case '+': + switch { + case len(line) == 3 && line[1] == 'O' && line[2] == 'K': + // Avoid allocation for frequent "+OK" response. + return okReply, nil + case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': + // Avoid allocation in PING command benchmarks :) + return pongReply, nil + default: + return string(line[1:]), nil + } + case '-': + return Error(string(line[1:])), nil + case ':': + return parseInt(line[1:]) + case '$': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + p := make([]byte, n) + _, err = io.ReadFull(c.br, p) + if err != nil { + return nil, err + } + if line, err := c.readLine(); err != nil { + return nil, err + } else if len(line) != 0 { + return nil, protocolError("bad bulk string format") + } + return p, nil + case '*': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + r := make([]interface{}, n) + for i := range r { + r[i], err = c.readReply() + if err != nil { + return nil, err + } + } + return r, nil + } + return nil, protocolError("unexpected response line") +} + +func (c *conn) Send(cmd string, args ...interface{}) error { + c.mu.Lock() + c.pending += 1 + c.mu.Unlock() + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.writeCommand(cmd, args); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Flush() error { + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.bw.Flush(); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Receive() (reply interface{}, err error) { + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + if reply, err = c.readReply(); err != nil { + return nil, c.fatal(err) + } + // When using pub/sub, the number of receives can be greater than the + // number of sends. To enable normal use of the connection after + // unsubscribing from all channels, we do not decrement pending to a + // negative value. + // + // The pending field is decremented after the reply is read to handle the + // case where Receive is called before Send. + c.mu.Lock() + if c.pending > 0 { + c.pending -= 1 + } + c.mu.Unlock() + if err, ok := reply.(Error); ok { + return nil, err + } + return +} + +func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { + c.mu.Lock() + pending := c.pending + c.pending = 0 + c.mu.Unlock() + + if cmd == "" && pending == 0 { + return nil, nil + } + + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + + if cmd != "" { + c.writeCommand(cmd, args) + } + + if err := c.bw.Flush(); err != nil { + return nil, c.fatal(err) + } + + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + + if cmd == "" { + reply := make([]interface{}, pending) + for i := range reply { + r, e := c.readReply() + if e != nil { + return nil, c.fatal(e) + } + reply[i] = r + } + return reply, nil + } + + var err error + var reply interface{} + for i := 0; i <= pending; i++ { + var e error + if reply, e = c.readReply(); e != nil { + return nil, c.fatal(e) + } + if e, ok := reply.(Error); ok && err == nil { + err = e + } + } + return reply, err +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn_test.go new file mode 100644 index 000000000000..800370136eb6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn_test.go @@ -0,0 +1,542 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "bufio" + "bytes" + "math" + "net" + "reflect" + "strings" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +var writeTests = []struct { + args []interface{} + expected string +}{ + { + []interface{}{"SET", "key", "value"}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", + }, + { + []interface{}{"SET", "key", "value"}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", + }, + { + []interface{}{"SET", "key", byte(100)}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", + }, + { + []interface{}{"SET", "key", 100}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", + }, + { + []interface{}{"SET", "key", int64(math.MinInt64)}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$20\r\n-9223372036854775808\r\n", + }, + { + []interface{}{"SET", "key", float64(1349673917.939762)}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$21\r\n1.349673917939762e+09\r\n", + }, + { + []interface{}{"SET", "key", ""}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", + }, + { + []interface{}{"SET", "key", nil}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", + }, + { + []interface{}{"ECHO", true, false}, + "*3\r\n$4\r\nECHO\r\n$1\r\n1\r\n$1\r\n0\r\n", + }, +} + +func TestWrite(t *testing.T) { + for _, tt := range writeTests { + var buf bytes.Buffer + rw := bufio.ReadWriter{Writer: bufio.NewWriter(&buf)} + c := redis.NewConnBufio(rw) + err := c.Send(tt.args[0].(string), tt.args[1:]...) + if err != nil { + t.Errorf("Send(%v) returned error %v", tt.args, err) + continue + } + rw.Flush() + actual := buf.String() + if actual != tt.expected { + t.Errorf("Send(%v) = %q, want %q", tt.args, actual, tt.expected) + } + } +} + +var errorSentinel = &struct{}{} + +var readTests = []struct { + reply string + expected interface{} +}{ + { + "+OK\r\n", + "OK", + }, + { + "+PONG\r\n", + "PONG", + }, + { + "@OK\r\n", + errorSentinel, + }, + { + "$6\r\nfoobar\r\n", + []byte("foobar"), + }, + { + "$-1\r\n", + nil, + }, + { + ":1\r\n", + int64(1), + }, + { + ":-2\r\n", + int64(-2), + }, + { + "*0\r\n", + []interface{}{}, + }, + { + "*-1\r\n", + nil, + }, + { + "*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$5\r\nWorld\r\n", + []interface{}{[]byte("foo"), []byte("bar"), []byte("Hello"), []byte("World")}, + }, + { + "*3\r\n$3\r\nfoo\r\n$-1\r\n$3\r\nbar\r\n", + []interface{}{[]byte("foo"), nil, []byte("bar")}, + }, + + { + // "x" is not a valid length + "$x\r\nfoobar\r\n", + errorSentinel, + }, + { + // -2 is not a valid length + "$-2\r\n", + errorSentinel, + }, + { + // "x" is not a valid integer + ":x\r\n", + errorSentinel, + }, + { + // missing \r\n following value + "$6\r\nfoobar", + errorSentinel, + }, + { + // short value + "$6\r\nxx", + errorSentinel, + }, + { + // long value + "$6\r\nfoobarx\r\n", + errorSentinel, + }, +} + +func TestRead(t *testing.T) { + for _, tt := range readTests { + rw := bufio.ReadWriter{ + Reader: bufio.NewReader(strings.NewReader(tt.reply)), + Writer: bufio.NewWriter(nil), // writer need to support Flush + } + c := redis.NewConnBufio(rw) + actual, err := c.Receive() + if tt.expected == errorSentinel { + if err == nil { + t.Errorf("Receive(%q) did not return expected error", tt.reply) + } + } else { + if err != nil { + t.Errorf("Receive(%q) returned error %v", tt.reply, err) + continue + } + if !reflect.DeepEqual(actual, tt.expected) { + t.Errorf("Receive(%q) = %v, want %v", tt.reply, actual, tt.expected) + } + } + } +} + +var testCommands = []struct { + args []interface{} + expected interface{} +}{ + { + []interface{}{"PING"}, + "PONG", + }, + { + []interface{}{"SET", "foo", "bar"}, + "OK", + }, + { + []interface{}{"GET", "foo"}, + []byte("bar"), + }, + { + []interface{}{"GET", "nokey"}, + nil, + }, + { + []interface{}{"MGET", "nokey", "foo"}, + []interface{}{nil, []byte("bar")}, + }, + { + []interface{}{"INCR", "mycounter"}, + int64(1), + }, + { + []interface{}{"LPUSH", "mylist", "foo"}, + int64(1), + }, + { + []interface{}{"LPUSH", "mylist", "bar"}, + int64(2), + }, + { + []interface{}{"LRANGE", "mylist", 0, -1}, + []interface{}{[]byte("bar"), []byte("foo")}, + }, + { + []interface{}{"MULTI"}, + "OK", + }, + { + []interface{}{"LRANGE", "mylist", 0, -1}, + "QUEUED", + }, + { + []interface{}{"PING"}, + "QUEUED", + }, + { + []interface{}{"EXEC"}, + []interface{}{ + []interface{}{[]byte("bar"), []byte("foo")}, + "PONG", + }, + }, +} + +func TestDoCommands(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + for _, cmd := range testCommands { + actual, err := c.Do(cmd.args[0].(string), cmd.args[1:]...) + if err != nil { + t.Errorf("Do(%v) returned error %v", cmd.args, err) + continue + } + if !reflect.DeepEqual(actual, cmd.expected) { + t.Errorf("Do(%v) = %v, want %v", cmd.args, actual, cmd.expected) + } + } +} + +func TestPipelineCommands(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + for _, cmd := range testCommands { + if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { + t.Fatalf("Send(%v) returned error %v", cmd.args, err) + } + } + if err := c.Flush(); err != nil { + t.Errorf("Flush() returned error %v", err) + } + for _, cmd := range testCommands { + actual, err := c.Receive() + if err != nil { + t.Fatalf("Receive(%v) returned error %v", cmd.args, err) + } + if !reflect.DeepEqual(actual, cmd.expected) { + t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) + } + } +} + +func TestBlankCommmand(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + for _, cmd := range testCommands { + if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { + t.Fatalf("Send(%v) returned error %v", cmd.args, err) + } + } + reply, err := redis.Values(c.Do("")) + if err != nil { + t.Fatalf("Do() returned error %v", err) + } + if len(reply) != len(testCommands) { + t.Fatalf("len(reply)=%d, want %d", len(reply), len(testCommands)) + } + for i, cmd := range testCommands { + actual := reply[i] + if !reflect.DeepEqual(actual, cmd.expected) { + t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) + } + } +} + +func TestRecvBeforeSend(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + done := make(chan struct{}) + go func() { + c.Receive() + close(done) + }() + time.Sleep(time.Millisecond) + c.Send("PING") + c.Flush() + <-done + _, err = c.Do("") + if err != nil { + t.Fatalf("error=%v", err) + } +} + +func TestError(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + c.Do("SET", "key", "val") + _, err = c.Do("HSET", "key", "fld", "val") + if err == nil { + t.Errorf("Expected err for HSET on string key.") + } + if c.Err() != nil { + t.Errorf("Conn has Err()=%v, expect nil", c.Err()) + } + _, err = c.Do("SET", "key", "val") + if err != nil { + t.Errorf("Do(SET, key, val) returned error %v, expected nil.", err) + } +} + +func TestReadDeadline(t *testing.T) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen returned %v", err) + } + defer l.Close() + + go func() { + for { + c, err := l.Accept() + if err != nil { + return + } + go func() { + time.Sleep(time.Second) + c.Write([]byte("+OK\r\n")) + c.Close() + }() + } + }() + + c1, err := redis.DialTimeout(l.Addr().Network(), l.Addr().String(), 0, time.Millisecond, 0) + if err != nil { + t.Fatalf("redis.Dial returned %v", err) + } + defer c1.Close() + + _, err = c1.Do("PING") + if err == nil { + t.Fatalf("c1.Do() returned nil, expect error") + } + if c1.Err() == nil { + t.Fatalf("c1.Err() = nil, expect error") + } + + c2, err := redis.DialTimeout(l.Addr().Network(), l.Addr().String(), 0, time.Millisecond, 0) + if err != nil { + t.Fatalf("redis.Dial returned %v", err) + } + defer c2.Close() + + c2.Send("PING") + c2.Flush() + _, err = c2.Receive() + if err == nil { + t.Fatalf("c2.Receive() returned nil, expect error") + } + if c2.Err() == nil { + t.Fatalf("c2.Err() = nil, expect error") + } +} + +// Connect to local instance of Redis running on the default port. +func ExampleDial(x int) { + c, err := redis.Dial("tcp", ":6379") + if err != nil { + // handle error + } + defer c.Close() +} + +// TextExecError tests handling of errors in a transaction. See +// http://redis.io/topics/transactions for information on how Redis handles +// errors in a transaction. +func TestExecError(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + // Execute commands that fail before EXEC is called. + + c.Do("ZADD", "k0", 0, 0) + c.Send("MULTI") + c.Send("NOTACOMMAND", "k0", 0, 0) + c.Send("ZINCRBY", "k0", 0, 0) + v, err := c.Do("EXEC") + if err == nil { + t.Fatalf("EXEC returned values %v, expected error", v) + } + + // Execute commands that fail after EXEC is called. The first command + // returns an error. + + c.Do("ZADD", "k1", 0, 0) + c.Send("MULTI") + c.Send("HSET", "k1", 0, 0) + c.Send("ZINCRBY", "k1", 0, 0) + v, err = c.Do("EXEC") + if err != nil { + t.Fatalf("EXEC returned error %v", err) + } + + vs, err := redis.Values(v, nil) + if err != nil { + t.Fatalf("Values(v) returned error %v", err) + } + + if len(vs) != 2 { + t.Fatalf("len(vs) == %d, want 2", len(vs)) + } + + if _, ok := vs[0].(error); !ok { + t.Fatalf("first result is type %T, expected error", vs[0]) + } + + if _, ok := vs[1].([]byte); !ok { + t.Fatalf("second result is type %T, expected []byte", vs[2]) + } + + // Execute commands that fail after EXEC is called. The second command + // returns an error. + + c.Do("ZADD", "k2", 0, 0) + c.Send("MULTI") + c.Send("ZINCRBY", "k2", 0, 0) + c.Send("HSET", "k2", 0, 0) + v, err = c.Do("EXEC") + if err != nil { + t.Fatalf("EXEC returned error %v", err) + } + + vs, err = redis.Values(v, nil) + if err != nil { + t.Fatalf("Values(v) returned error %v", err) + } + + if len(vs) != 2 { + t.Fatalf("len(vs) == %d, want 2", len(vs)) + } + + if _, ok := vs[0].([]byte); !ok { + t.Fatalf("first result is type %T, expected []byte", vs[0]) + } + + if _, ok := vs[1].(error); !ok { + t.Fatalf("second result is type %T, expected error", vs[2]) + } +} + +func BenchmarkDoEmpty(b *testing.B) { + b.StopTimer() + c, err := redistest.Dial() + if err != nil { + b.Fatal(err) + } + defer c.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + if _, err := c.Do(""); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDoPing(b *testing.B) { + b.StopTimer() + c, err := redistest.Dial() + if err != nil { + b.Fatal(err) + } + defer c.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + if _, err := c.Do("PING"); err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go new file mode 100644 index 000000000000..1ae6f0cc2a43 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go @@ -0,0 +1,169 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redis is a client for the Redis database. +// +// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more +// documentation about this package. +// +// Connections +// +// The Conn interface is the primary interface for working with Redis. +// Applications create connections by calling the Dial, DialWithTimeout or +// NewConn functions. In the future, functions will be added for creating +// sharded and other types of connections. +// +// The application must call the connection Close method when the application +// is done with the connection. +// +// Executing Commands +// +// The Conn interface has a generic method for executing Redis commands: +// +// Do(commandName string, args ...interface{}) (reply interface{}, err error) +// +// The Redis command reference (http://redis.io/commands) lists the available +// commands. An example of using the Redis APPEND command is: +// +// n, err := conn.Do("APPEND", "key", "value") +// +// The Do method converts command arguments to binary strings for transmission +// to the server as follows: +// +// Go Type Conversion +// []byte Sent as is +// string Sent as is +// int, int64 strconv.FormatInt(v) +// float64 strconv.FormatFloat(v, 'g', -1, 64) +// bool true -> "1", false -> "0" +// nil "" +// all other types fmt.Print(v) +// +// Redis command reply types are represented using the following Go types: +// +// Redis type Go type +// error redis.Error +// integer int64 +// simple string string +// bulk string []byte or nil if value not present. +// array []interface{} or nil if value not present. +// +// Use type assertions or the reply helper functions to convert from +// interface{} to the specific Go type for the command result. +// +// Pipelining +// +// Connections support pipelining using the Send, Flush and Receive methods. +// +// Send(commandName string, args ...interface{}) error +// Flush() error +// Receive() (reply interface{}, err error) +// +// Send writes the command to the connection's output buffer. Flush flushes the +// connection's output buffer to the server. Receive reads a single reply from +// the server. The following example shows a simple pipeline. +// +// c.Send("SET", "foo", "bar") +// c.Send("GET", "foo") +// c.Flush() +// c.Receive() // reply from SET +// v, err = c.Receive() // reply from GET +// +// The Do method combines the functionality of the Send, Flush and Receive +// methods. The Do method starts by writing the command and flushing the output +// buffer. Next, the Do method receives all pending replies including the reply +// for the command just sent by Do. If any of the received replies is an error, +// then Do returns the error. If there are no errors, then Do returns the last +// reply. If the command argument to the Do method is "", then the Do method +// will flush the output buffer and receive pending replies without sending a +// command. +// +// Use the Send and Do methods to implement pipelined transactions. +// +// c.Send("MULTI") +// c.Send("INCR", "foo") +// c.Send("INCR", "bar") +// r, err := c.Do("EXEC") +// fmt.Println(r) // prints [1, 1] +// +// Concurrency +// +// Connections do not support concurrent calls to the write methods (Send, +// Flush) or concurrent calls to the read method (Receive). Connections do +// allow a concurrent reader and writer. +// +// Because the Do method combines the functionality of Send, Flush and Receive, +// the Do method cannot be called concurrently with the other methods. +// +// For full concurrent access to Redis, use the thread-safe Pool to get and +// release connections from within a goroutine. +// +// Publish and Subscribe +// +// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. +// +// c.Send("SUBSCRIBE", "example") +// c.Flush() +// for { +// reply, err := c.Receive() +// if err != nil { +// return err +// } +// // process pushed message +// } +// +// The PubSubConn type wraps a Conn with convenience methods for implementing +// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods +// send and flush a subscription management command. The receive method +// converts a pushed message to convenient types for use in a type switch. +// +// psc := redis.PubSubConn{c} +// psc.Subscribe("example") +// for { +// switch v := psc.Receive().(type) { +// case redis.Message: +// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) +// case redis.Subscription: +// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) +// case error: +// return v +// } +// } +// +// Reply Helpers +// +// The Bool, Int, Bytes, String, Strings and Values functions convert a reply +// to a value of a specific type. To allow convenient wrapping of calls to the +// connection Do and Receive methods, the functions take a second argument of +// type error. If the error is non-nil, then the helper function returns the +// error. If the error is nil, the function converts the reply to the specified +// type: +// +// exists, err := redis.Bool(c.Do("EXISTS", "foo")) +// if err != nil { +// // handle error return from c.Do or type conversion error. +// } +// +// The Scan function converts elements of a array reply to Go types: +// +// var value1 int +// var value2 string +// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) +// if err != nil { +// // handle error +// } +// if _, err := redis.Scan(reply, &value1, &value2); err != nil { +// // handle error +// } +package redis diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go new file mode 100644 index 000000000000..129b86d6708e --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go @@ -0,0 +1,117 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "fmt" + "log" +) + +// NewLoggingConn returns a logging wrapper around a connection. +func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { + if prefix != "" { + prefix = prefix + "." + } + return &loggingConn{conn, logger, prefix} +} + +type loggingConn struct { + Conn + logger *log.Logger + prefix string +} + +func (c *loggingConn) Close() error { + err := c.Conn.Close() + var buf bytes.Buffer + fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) + c.logger.Output(2, buf.String()) + return err +} + +func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { + const chop = 32 + switch v := v.(type) { + case []byte: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case string: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case []interface{}: + if len(v) == 0 { + buf.WriteString("[]") + } else { + sep := "[" + fin := "]" + if len(v) > chop { + v = v[:chop] + fin = "...]" + } + for _, vv := range v { + buf.WriteString(sep) + c.printValue(buf, vv) + sep = ", " + } + buf.WriteString(fin) + } + default: + fmt.Fprint(buf, v) + } +} + +func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s%s(", c.prefix, method) + if method != "Receive" { + buf.WriteString(commandName) + for _, arg := range args { + buf.WriteString(", ") + c.printValue(&buf, arg) + } + } + buf.WriteString(") -> (") + if method != "Send" { + c.printValue(&buf, reply) + buf.WriteString(", ") + } + fmt.Fprintf(&buf, "%v)", err) + c.logger.Output(3, buf.String()) +} + +func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { + reply, err := c.Conn.Do(commandName, args...) + c.print("Do", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) Send(commandName string, args ...interface{}) error { + err := c.Conn.Send(commandName, args...) + c.print("Send", commandName, args, nil, err) + return err +} + +func (c *loggingConn) Receive() (interface{}, error) { + reply, err := c.Conn.Receive() + c.print("Receive", "", nil, reply, err) + return reply, err +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go new file mode 100644 index 000000000000..9daf2e33ff38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go @@ -0,0 +1,389 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "container/list" + "crypto/rand" + "crypto/sha1" + "errors" + "io" + "strconv" + "sync" + "time" + + "github.com/garyburd/redigo/internal" +) + +var nowFunc = time.Now // for testing + +// ErrPoolExhausted is returned from a pool connection method (Do, Send, +// Receive, Flush, Err) when the maximum number of database connections in the +// pool has been reached. +var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") + +var ( + errPoolClosed = errors.New("redigo: connection pool closed") + errConnClosed = errors.New("redigo: connection closed") +) + +// Pool maintains a pool of connections. The application calls the Get method +// to get a connection from the pool and the connection's Close method to +// return the connection's resources to the pool. +// +// The following example shows how to use a pool in a web application. The +// application creates a pool at application startup and makes it available to +// request handlers using a global variable. +// +// func newPool(server, password string) *redis.Pool { +// return &redis.Pool{ +// MaxIdle: 3, +// IdleTimeout: 240 * time.Second, +// Dial: func () (redis.Conn, error) { +// c, err := redis.Dial("tcp", server) +// if err != nil { +// return nil, err +// } +// if _, err := c.Do("AUTH", password); err != nil { +// c.Close() +// return nil, err +// } +// return c, err +// }, +// TestOnBorrow: func(c redis.Conn, t time.Time) error { +// _, err := c.Do("PING") +// return err +// }, +// } +// } +// +// var ( +// pool *redis.Pool +// redisServer = flag.String("redisServer", ":6379", "") +// redisPassword = flag.String("redisPassword", "", "") +// ) +// +// func main() { +// flag.Parse() +// pool = newPool(*redisServer, *redisPassword) +// ... +// } +// +// A request handler gets a connection from the pool and closes the connection +// when the handler is done: +// +// func serveHome(w http.ResponseWriter, r *http.Request) { +// conn := pool.Get() +// defer conn.Close() +// .... +// } +// +type Pool struct { + + // Dial is an application supplied function for creating and configuring a + // connection + Dial func() (Conn, error) + + // TestOnBorrow is an optional application supplied function for checking + // the health of an idle connection before the connection is used again by + // the application. Argument t is the time that the connection was returned + // to the pool. If the function returns an error, then the connection is + // closed. + TestOnBorrow func(c Conn, t time.Time) error + + // Maximum number of idle connections in the pool. + MaxIdle int + + // Maximum number of connections allocated by the pool at a given time. + // When zero, there is no limit on the number of connections in the pool. + MaxActive int + + // Close connections after remaining idle for this duration. If the value + // is zero, then idle connections are not closed. Applications should set + // the timeout to a value less than the server's timeout. + IdleTimeout time.Duration + + // If Wait is true and the pool is at the MaxIdle limit, then Get() waits + // for a connection to be returned to the pool before returning. + Wait bool + + // mu protects fields defined below. + mu sync.Mutex + cond *sync.Cond + closed bool + active int + + // Stack of idleConn with most recently used at the front. + idle list.List +} + +type idleConn struct { + c Conn + t time.Time +} + +// NewPool creates a new pool. This function is deprecated. Applications should +// initialize the Pool fields directly as shown in example. +func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { + return &Pool{Dial: newFn, MaxIdle: maxIdle} +} + +// Get gets a connection. The application must close the returned connection. +// This method always returns a valid connection so that applications can defer +// error handling to the first use of the connection. If there is an error +// getting an underlying connection, then the connection Err, Do, Send, Flush +// and Receive methods return that error. +func (p *Pool) Get() Conn { + c, err := p.get() + if err != nil { + return errorConnection{err} + } + return &pooledConnection{p: p, c: c} +} + +// ActiveCount returns the number of active connections in the pool. +func (p *Pool) ActiveCount() int { + p.mu.Lock() + active := p.active + p.mu.Unlock() + return active +} + +// Close releases the resources used by the pool. +func (p *Pool) Close() error { + p.mu.Lock() + idle := p.idle + p.idle.Init() + p.closed = true + p.active -= idle.Len() + if p.cond != nil { + p.cond.Broadcast() + } + p.mu.Unlock() + for e := idle.Front(); e != nil; e = e.Next() { + e.Value.(idleConn).c.Close() + } + return nil +} + +// release decrements the active count and signals waiters. The caller must +// hold p.mu during the call. +func (p *Pool) release() { + p.active -= 1 + if p.cond != nil { + p.cond.Signal() + } +} + +// get prunes stale connections and returns a connection from the idle list or +// creates a new connection. +func (p *Pool) get() (Conn, error) { + p.mu.Lock() + + // Prune stale connections. + + if timeout := p.IdleTimeout; timeout > 0 { + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Back() + if e == nil { + break + } + ic := e.Value.(idleConn) + if ic.t.Add(timeout).After(nowFunc()) { + break + } + p.idle.Remove(e) + p.release() + p.mu.Unlock() + ic.c.Close() + p.mu.Lock() + } + } + + for { + + // Get idle connection. + + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Front() + if e == nil { + break + } + ic := e.Value.(idleConn) + p.idle.Remove(e) + test := p.TestOnBorrow + p.mu.Unlock() + if test == nil || test(ic.c, ic.t) == nil { + return ic.c, nil + } + ic.c.Close() + p.mu.Lock() + p.release() + } + + // Check for pool closed before dialing a new connection. + + if p.closed { + p.mu.Unlock() + return nil, errors.New("redigo: get on closed pool") + } + + // Dial new connection if under limit. + + if p.MaxActive == 0 || p.active < p.MaxActive { + dial := p.Dial + p.active += 1 + p.mu.Unlock() + c, err := dial() + if err != nil { + p.mu.Lock() + p.release() + p.mu.Unlock() + c = nil + } + return c, err + } + + if !p.Wait { + p.mu.Unlock() + return nil, ErrPoolExhausted + } + + if p.cond == nil { + p.cond = sync.NewCond(&p.mu) + } + p.cond.Wait() + } +} + +func (p *Pool) put(c Conn, forceClose bool) error { + err := c.Err() + p.mu.Lock() + if !p.closed && err == nil && !forceClose { + p.idle.PushFront(idleConn{t: nowFunc(), c: c}) + if p.idle.Len() > p.MaxIdle { + c = p.idle.Remove(p.idle.Back()).(idleConn).c + } else { + c = nil + } + } + + if c == nil { + if p.cond != nil { + p.cond.Signal() + } + p.mu.Unlock() + return nil + } + + p.release() + p.mu.Unlock() + return c.Close() +} + +type pooledConnection struct { + p *Pool + c Conn + state int +} + +var ( + sentinel []byte + sentinelOnce sync.Once +) + +func initSentinel() { + p := make([]byte, 64) + if _, err := rand.Read(p); err == nil { + sentinel = p + } else { + h := sha1.New() + io.WriteString(h, "Oops, rand failed. Use time instead.") + io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) + sentinel = h.Sum(nil) + } +} + +func (pc *pooledConnection) Close() error { + c := pc.c + if _, ok := c.(errorConnection); ok { + return nil + } + pc.c = errorConnection{errConnClosed} + + if pc.state&internal.MultiState != 0 { + c.Send("DISCARD") + pc.state &^= (internal.MultiState | internal.WatchState) + } else if pc.state&internal.WatchState != 0 { + c.Send("UNWATCH") + pc.state &^= internal.WatchState + } + if pc.state&internal.SubscribeState != 0 { + c.Send("UNSUBSCRIBE") + c.Send("PUNSUBSCRIBE") + // To detect the end of the message stream, ask the server to echo + // a sentinel value and read until we see that value. + sentinelOnce.Do(initSentinel) + c.Send("ECHO", sentinel) + c.Flush() + for { + p, err := c.Receive() + if err != nil { + break + } + if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { + pc.state &^= internal.SubscribeState + break + } + } + } + c.Do("") + pc.p.put(c, pc.state != 0) + return nil +} + +func (pc *pooledConnection) Err() error { + return pc.c.Err() +} + +func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Do(commandName, args...) +} + +func (pc *pooledConnection) Send(commandName string, args ...interface{}) error { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Send(commandName, args...) +} + +func (pc *pooledConnection) Flush() error { + return pc.c.Flush() +} + +func (pc *pooledConnection) Receive() (reply interface{}, err error) { + return pc.c.Receive() +} + +type errorConnection struct{ err error } + +func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } +func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } +func (ec errorConnection) Err() error { return ec.err } +func (ec errorConnection) Close() error { return ec.err } +func (ec errorConnection) Flush() error { return ec.err } +func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool_test.go new file mode 100644 index 000000000000..1fe305f16859 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool_test.go @@ -0,0 +1,674 @@ +// Copyright 2011 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "errors" + "io" + "reflect" + "sync" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +type poolTestConn struct { + d *poolDialer + err error + redis.Conn +} + +func (c *poolTestConn) Close() error { c.d.open -= 1; return nil } +func (c *poolTestConn) Err() error { return c.err } + +func (c *poolTestConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + if commandName == "ERR" { + c.err = args[0].(error) + commandName = "PING" + } + if commandName != "" { + c.d.commands = append(c.d.commands, commandName) + } + return c.Conn.Do(commandName, args...) +} + +func (c *poolTestConn) Send(commandName string, args ...interface{}) error { + c.d.commands = append(c.d.commands, commandName) + return c.Conn.Send(commandName, args...) +} + +type poolDialer struct { + t *testing.T + dialed int + open int + commands []string + dialErr error +} + +func (d *poolDialer) dial() (redis.Conn, error) { + d.dialed += 1 + if d.dialErr != nil { + return nil, d.dialErr + } + c, err := redistest.Dial() + if err != nil { + return nil, err + } + d.open += 1 + return &poolTestConn{d: d, Conn: c}, nil +} + +func (d *poolDialer) check(message string, p *redis.Pool, dialed, open int) { + if d.dialed != dialed { + d.t.Errorf("%s: dialed=%d, want %d", message, d.dialed, dialed) + } + if d.open != open { + d.t.Errorf("%s: open=%d, want %d", message, d.open, open) + } + if active := p.ActiveCount(); active != open { + d.t.Errorf("%s: active=%d, want %d", message, active, open) + } +} + +func TestPoolReuse(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + + for i := 0; i < 10; i++ { + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + c1.Close() + c2.Close() + } + + d.check("before close", p, 2, 2) + p.Close() + d.check("after close", p, 2, 0) +} + +func TestPoolMaxIdle(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + for i := 0; i < 10; i++ { + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + c3 := p.Get() + c3.Do("PING") + c1.Close() + c2.Close() + c3.Close() + } + d.check("before close", p, 12, 2) + p.Close() + d.check("after close", p, 12, 0) +} + +func TestPoolError(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + + c := p.Get() + c.Do("ERR", io.EOF) + if c.Err() == nil { + t.Errorf("expected c.Err() != nil") + } + c.Close() + + c = p.Get() + c.Do("ERR", io.EOF) + c.Close() + + d.check(".", p, 2, 0) +} + +func TestPoolClose(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + c3 := p.Get() + c3.Do("PING") + + c1.Close() + if _, err := c1.Do("PING"); err == nil { + t.Errorf("expected error after connection closed") + } + + c2.Close() + c2.Close() + + p.Close() + + d.check("after pool close", p, 3, 1) + + if _, err := c1.Do("PING"); err == nil { + t.Errorf("expected error after connection and pool closed") + } + + c3.Close() + + d.check("after conn close", p, 3, 0) + + c1 = p.Get() + if _, err := c1.Do("PING"); err == nil { + t.Errorf("expected error after pool closed") + } +} + +func TestPoolTimeout(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + IdleTimeout: 300 * time.Second, + Dial: d.dial, + } + + now := time.Now() + redis.SetNowFunc(func() time.Time { return now }) + defer redis.SetNowFunc(time.Now) + + c := p.Get() + c.Do("PING") + c.Close() + + d.check("1", p, 1, 1) + + now = now.Add(p.IdleTimeout) + + c = p.Get() + c.Do("PING") + c.Close() + + d.check("2", p, 2, 1) + + p.Close() +} + +func TestPoolConcurrenSendReceive(t *testing.T) { + p := &redis.Pool{ + Dial: redistest.Dial, + } + c := p.Get() + done := make(chan error, 1) + go func() { + _, err := c.Receive() + done <- err + }() + c.Send("PING") + c.Flush() + err := <-done + if err != nil { + t.Fatalf("Receive() returned error %v", err) + } + _, err = c.Do("") + if err != nil { + t.Fatalf("Do() returned error %v", err) + } + c.Close() + p.Close() +} + +func TestPoolBorrowCheck(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + TestOnBorrow: func(redis.Conn, time.Time) error { return redis.Error("BLAH") }, + } + + for i := 0; i < 10; i++ { + c := p.Get() + c.Do("PING") + c.Close() + } + d.check("1", p, 10, 1) + p.Close() +} + +func TestPoolMaxActive(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + + d.check("1", p, 2, 2) + + c3 := p.Get() + if _, err := c3.Do("PING"); err != redis.ErrPoolExhausted { + t.Errorf("expected pool exhausted") + } + + c3.Close() + d.check("2", p, 2, 2) + c2.Close() + d.check("3", p, 2, 2) + + c3 = p.Get() + if _, err := c3.Do("PING"); err != nil { + t.Errorf("expected good channel, err=%v", err) + } + c3.Close() + + d.check("4", p, 2, 2) + p.Close() +} + +func TestPoolMonitorCleanup(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + c := p.Get() + c.Send("MONITOR") + c.Close() + + d.check("", p, 1, 0) + p.Close() +} + +func TestPoolPubSubCleanup(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + + c := p.Get() + c.Send("SUBSCRIBE", "x") + c.Close() + + want := []string{"SUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Send("PSUBSCRIBE", "x*") + c.Close() + + want = []string{"PSUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + p.Close() +} + +func TestPoolTransactionCleanup(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + + c := p.Get() + c.Do("WATCH", "key") + c.Do("PING") + c.Close() + + want := []string{"WATCH", "PING", "UNWATCH"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("UNWATCH") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "UNWATCH", "PING"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("MULTI") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "MULTI", "PING", "DISCARD"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("MULTI") + c.Do("DISCARD") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "MULTI", "DISCARD", "PING"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("MULTI") + c.Do("EXEC") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "MULTI", "EXEC", "PING"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + p.Close() +} + +func startGoroutines(p *redis.Pool, cmd string, args ...interface{}) chan error { + errs := make(chan error, 10) + for i := 0; i < cap(errs); i++ { + go func() { + c := p.Get() + _, err := c.Do(cmd, args...) + errs <- err + c.Close() + }() + } + + // Wait for goroutines to block. + time.Sleep(time.Second / 4) + + return errs +} + +func TestWaitPool(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + defer p.Close() + c := p.Get() + errs := startGoroutines(p, "PING") + d.check("before close", p, 1, 1) + c.Close() + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + if err != nil { + t.Fatal(err) + } + case <-timeout: + t.Fatalf("timeout waiting for blocked goroutine %d", i) + } + } + d.check("done", p, 1, 1) +} + +func TestWaitPoolClose(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + c := p.Get() + if _, err := c.Do("PING"); err != nil { + t.Fatal(err) + } + errs := startGoroutines(p, "PING") + d.check("before close", p, 1, 1) + p.Close() + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + switch err { + case nil: + t.Fatal("blocked goroutine did not get error") + case redis.ErrPoolExhausted: + t.Fatal("blocked goroutine got pool exhausted error") + } + case <-timeout: + t.Fatal("timeout waiting for blocked goroutine") + } + } + c.Close() + d.check("done", p, 1, 0) +} + +func TestWaitPoolCommandError(t *testing.T) { + testErr := errors.New("test") + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + defer p.Close() + c := p.Get() + errs := startGoroutines(p, "ERR", testErr) + d.check("before close", p, 1, 1) + c.Close() + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + if err != nil { + t.Fatal(err) + } + case <-timeout: + t.Fatalf("timeout waiting for blocked goroutine %d", i) + } + } + d.check("done", p, cap(errs), 0) +} + +func TestWaitPoolDialError(t *testing.T) { + testErr := errors.New("test") + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + defer p.Close() + c := p.Get() + errs := startGoroutines(p, "ERR", testErr) + d.check("before close", p, 1, 1) + + d.dialErr = errors.New("dial") + c.Close() + + nilCount := 0 + errCount := 0 + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + switch err { + case nil: + nilCount++ + case d.dialErr: + errCount++ + default: + t.Fatalf("expected dial error or nil, got %v", err) + } + case <-timeout: + t.Fatalf("timeout waiting for blocked goroutine %d", i) + } + } + if nilCount != 1 { + t.Errorf("expected one nil error, got %d", nilCount) + } + if errCount != cap(errs)-1 { + t.Errorf("expected %d dial erors, got %d", cap(errs)-1, errCount) + } + d.check("done", p, cap(errs), 0) +} + +// Borrowing requires us to iterate over the idle connections, unlock the pool, +// and perform a blocking operation to check the connection still works. If +// TestOnBorrow fails, we must reacquire the lock and continue iteration. This +// test ensures that iteration will work correctly if multiple threads are +// iterating simultaneously. +func TestLocking_TestOnBorrowFails_PoolDoesntCrash(t *testing.T) { + count := 100 + + // First we'll Create a pool where the pilfering of idle connections fails. + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: count, + MaxActive: count, + Dial: d.dial, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + return errors.New("No way back into the real world.") + }, + } + defer p.Close() + + // Fill the pool with idle connections. + b1 := sync.WaitGroup{} + b1.Add(count) + b2 := sync.WaitGroup{} + b2.Add(count) + for i := 0; i < count; i++ { + go func() { + c := p.Get() + if c.Err() != nil { + t.Errorf("pool get failed: %v", c.Err()) + } + b1.Done() + b1.Wait() + c.Close() + b2.Done() + }() + } + b2.Wait() + if d.dialed != count { + t.Errorf("Expected %d dials, got %d", count, d.dialed) + } + + // Spawn a bunch of goroutines to thrash the pool. + b2.Add(count) + for i := 0; i < count; i++ { + go func() { + c := p.Get() + if c.Err() != nil { + t.Errorf("pool get failed: %v", c.Err()) + } + c.Close() + b2.Done() + }() + } + b2.Wait() + if d.dialed != count*2 { + t.Errorf("Expected %d dials, got %d", count*2, d.dialed) + } +} + +func BenchmarkPoolGet(b *testing.B) { + b.StopTimer() + p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} + c := p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + defer p.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + c = p.Get() + c.Close() + } +} + +func BenchmarkPoolGetErr(b *testing.B) { + b.StopTimer() + p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} + c := p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + defer p.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + c = p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + } +} + +func BenchmarkPoolGetPing(b *testing.B) { + b.StopTimer() + p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} + c := p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + defer p.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + c = p.Get() + if _, err := c.Do("PING"); err != nil { + b.Fatal(err) + } + c.Close() + } +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go new file mode 100644 index 000000000000..f0790429fd06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go @@ -0,0 +1,129 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" +) + +// Subscription represents a subscribe or unsubscribe notification. +type Subscription struct { + + // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" + Kind string + + // The channel that was changed. + Channel string + + // The current number of subscriptions for connection. + Count int +} + +// Message represents a message notification. +type Message struct { + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// PMessage represents a pmessage notification. +type PMessage struct { + + // The matched pattern. + Pattern string + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// PubSubConn wraps a Conn with convenience methods for subscribers. +type PubSubConn struct { + Conn Conn +} + +// Close closes the connection. +func (c PubSubConn) Close() error { + return c.Conn.Close() +} + +// Subscribe subscribes the connection to the specified channels. +func (c PubSubConn) Subscribe(channel ...interface{}) error { + c.Conn.Send("SUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PSubscribe subscribes the connection to the given patterns. +func (c PubSubConn) PSubscribe(channel ...interface{}) error { + c.Conn.Send("PSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Unsubscribe unsubscribes the connection from the given channels, or from all +// of them if none is given. +func (c PubSubConn) Unsubscribe(channel ...interface{}) error { + c.Conn.Send("UNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PUnsubscribe unsubscribes the connection from the given patterns, or from all +// of them if none is given. +func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { + c.Conn.Send("PUNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Receive returns a pushed message as a Subscription, Message, PMessage or +// error. The return value is intended to be used directly in a type switch as +// illustrated in the PubSubConn example. +func (c PubSubConn) Receive() interface{} { + reply, err := Values(c.Conn.Receive()) + if err != nil { + return err + } + + var kind string + reply, err = Scan(reply, &kind) + if err != nil { + return err + } + + switch kind { + case "message": + var m Message + if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { + return err + } + return m + case "pmessage": + var pm PMessage + if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { + return err + } + return pm + case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": + s := Subscription{Kind: kind} + if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { + return err + } + return s + } + return errors.New("redigo: unknown pubsub notification") +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub_test.go new file mode 100644 index 000000000000..707f5a4706a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub_test.go @@ -0,0 +1,143 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "net" + "reflect" + "sync" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +func publish(channel, value interface{}) { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + c.Do("PUBLISH", channel, value) +} + +// Applications can receive pushed messages from one goroutine and manage subscriptions from another goroutine. +func ExamplePubSubConn() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + var wg sync.WaitGroup + wg.Add(2) + + psc := redis.PubSubConn{Conn: c} + + // This goroutine receives and prints pushed notifications from the server. + // The goroutine exits when the connection is unsubscribed from all + // channels or there is an error. + go func() { + defer wg.Done() + for { + switch n := psc.Receive().(type) { + case redis.Message: + fmt.Printf("Message: %s %s\n", n.Channel, n.Data) + case redis.PMessage: + fmt.Printf("PMessage: %s %s %s\n", n.Pattern, n.Channel, n.Data) + case redis.Subscription: + fmt.Printf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count) + if n.Count == 0 { + return + } + case error: + fmt.Printf("error: %v\n", n) + return + } + } + }() + + // This goroutine manages subscriptions for the connection. + go func() { + defer wg.Done() + + psc.Subscribe("example") + psc.PSubscribe("p*") + + // The following function calls publish a message using another + // connection to the Redis server. + publish("example", "hello") + publish("example", "world") + publish("pexample", "foo") + publish("pexample", "bar") + + // Unsubscribe from all connections. This will cause the receiving + // goroutine to exit. + psc.Unsubscribe() + psc.PUnsubscribe() + }() + + wg.Wait() + + // Output: + // Subscription: subscribe example 1 + // Subscription: psubscribe p* 2 + // Message: example hello + // Message: example world + // PMessage: p* pexample foo + // PMessage: p* pexample bar + // Subscription: unsubscribe example 1 + // Subscription: punsubscribe p* 0 +} + +func expectPushed(t *testing.T, c redis.PubSubConn, message string, expected interface{}) { + actual := c.Receive() + if !reflect.DeepEqual(actual, expected) { + t.Errorf("%s = %v, want %v", message, actual, expected) + } +} + +func TestPushed(t *testing.T) { + pc, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer pc.Close() + + nc, err := net.Dial("tcp", ":6379") + if err != nil { + t.Fatal(err) + } + defer nc.Close() + nc.SetReadDeadline(time.Now().Add(4 * time.Second)) + + c := redis.PubSubConn{Conn: redis.NewConn(nc, 0, 0)} + + c.Subscribe("c1") + expectPushed(t, c, "Subscribe(c1)", redis.Subscription{Kind: "subscribe", Channel: "c1", Count: 1}) + c.Subscribe("c2") + expectPushed(t, c, "Subscribe(c2)", redis.Subscription{Kind: "subscribe", Channel: "c2", Count: 2}) + c.PSubscribe("p1") + expectPushed(t, c, "PSubscribe(p1)", redis.Subscription{Kind: "psubscribe", Channel: "p1", Count: 3}) + c.PSubscribe("p2") + expectPushed(t, c, "PSubscribe(p2)", redis.Subscription{Kind: "psubscribe", Channel: "p2", Count: 4}) + c.PUnsubscribe() + expectPushed(t, c, "Punsubscribe(p1)", redis.Subscription{Kind: "punsubscribe", Channel: "p1", Count: 3}) + expectPushed(t, c, "Punsubscribe()", redis.Subscription{Kind: "punsubscribe", Channel: "p2", Count: 2}) + + pc.Do("PUBLISH", "c1", "hello") + expectPushed(t, c, "PUBLISH c1 hello", redis.Message{Channel: "c1", Data: []byte("hello")}) +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go new file mode 100644 index 000000000000..c90a48ed44b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go @@ -0,0 +1,44 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +// Error represents an error returned in a command reply. +type Error string + +func (err Error) Error() string { return string(err) } + +// Conn represents a connection to a Redis server. +type Conn interface { + // Close closes the connection. + Close() error + + // Err returns a non-nil value if the connection is broken. The returned + // value is either the first non-nil value returned from the underlying + // network connection or a protocol parsing error. Applications should + // close broken connections. + Err() error + + // Do sends a command to the server and returns the received reply. + Do(commandName string, args ...interface{}) (reply interface{}, err error) + + // Send writes the command to the client's output buffer. + Send(commandName string, args ...interface{}) error + + // Flush flushes the output buffer to the Redis server. + Flush() error + + // Receive receives a single reply from the Redis server + Receive() (reply interface{}, err error) +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go new file mode 100644 index 000000000000..5648f930d672 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go @@ -0,0 +1,312 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "strconv" +) + +// ErrNil indicates that a reply value is nil. +var ErrNil = errors.New("redigo: nil returned") + +// Int is a helper that converts a command reply to an integer. If err is not +// equal to nil, then Int returns 0, err. Otherwise, Int converts the +// reply to an int as follows: +// +// Reply type Result +// integer int(reply), nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int(reply interface{}, err error) (int, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + x := int(reply) + if int64(x) != reply { + return 0, strconv.ErrRange + } + return x, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 0) + return int(n), err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) +} + +// Int64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int64(reply interface{}, err error) (int64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + return reply, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) +} + +var errNegativeInt = errors.New("redigo: unexpected value for Uint64") + +// Uint64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Uint64(reply interface{}, err error) (uint64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + if reply < 0 { + return 0, errNegativeInt + } + return uint64(reply), nil + case []byte: + n, err := strconv.ParseUint(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) +} + +// Float64 is a helper that converts a command reply to 64 bit float. If err is +// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts +// the reply to an int as follows: +// +// Reply type Result +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Float64(reply interface{}, err error) (float64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case []byte: + n, err := strconv.ParseFloat(string(reply), 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) +} + +// String is a helper that converts a command reply to a string. If err is not +// equal to nil, then String returns "", err. Otherwise String converts the +// reply to a string as follows: +// +// Reply type Result +// bulk string string(reply), nil +// simple string reply, nil +// nil "", ErrNil +// other "", error +func String(reply interface{}, err error) (string, error) { + if err != nil { + return "", err + } + switch reply := reply.(type) { + case []byte: + return string(reply), nil + case string: + return reply, nil + case nil: + return "", ErrNil + case Error: + return "", reply + } + return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) +} + +// Bytes is a helper that converts a command reply to a slice of bytes. If err +// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts +// the reply to a slice of bytes as follows: +// +// Reply type Result +// bulk string reply, nil +// simple string []byte(reply), nil +// nil nil, ErrNil +// other nil, error +func Bytes(reply interface{}, err error) ([]byte, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []byte: + return reply, nil + case string: + return []byte(reply), nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) +} + +// Bool is a helper that converts a command reply to a boolean. If err is not +// equal to nil, then Bool returns false, err. Otherwise Bool converts the +// reply to boolean as follows: +// +// Reply type Result +// integer value != 0, nil +// bulk string strconv.ParseBool(reply) +// nil false, ErrNil +// other false, error +func Bool(reply interface{}, err error) (bool, error) { + if err != nil { + return false, err + } + switch reply := reply.(type) { + case int64: + return reply != 0, nil + case []byte: + return strconv.ParseBool(string(reply)) + case nil: + return false, ErrNil + case Error: + return false, reply + } + return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) +} + +// MultiBulk is deprecated. Use Values. +func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } + +// Values is a helper that converts an array command reply to a []interface{}. +// If err is not equal to nil, then Values returns nil, err. Otherwise, Values +// converts the reply as follows: +// +// Reply type Result +// array reply, nil +// nil nil, ErrNil +// other nil, error +func Values(reply interface{}, err error) ([]interface{}, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + return reply, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) +} + +// Strings is a helper that converts an array command reply to a []string. If +// err is not equal to nil, then Strings returns nil, err. Nil array items are +// converted to "" in the output slice. Strings returns an error if an array +// item is not a bulk string or nil. +func Strings(reply interface{}, err error) ([]string, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + result := make([]string, len(reply)) + for i := range reply { + if reply[i] == nil { + continue + } + p, ok := reply[i].([]byte) + if !ok { + return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i]) + } + result[i] = string(p) + } + return result, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply) +} + +// Ints is a helper that converts an array command reply to a []int. If +// err is not equal to nil, then Ints returns nil, err. +func Ints(reply interface{}, err error) ([]int, error) { + var ints []int + if reply == nil { + return ints, ErrNil + } + values, err := Values(reply, err) + if err != nil { + return ints, err + } + if err := ScanSlice(values, &ints); err != nil { + return ints, err + } + return ints, nil +} + +// StringMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. +// Requires an even number of values in result. +func StringMap(result interface{}, err error) (map[string]string, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: StringMap expects even number of values result") + } + m := make(map[string]string, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, okKey := values[i].([]byte) + value, okValue := values[i+1].([]byte) + if !okKey || !okValue { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + m[string(key)] = string(value) + } + return m, nil +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply_test.go new file mode 100644 index 000000000000..92744c590b69 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply_test.go @@ -0,0 +1,166 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +type valueError struct { + v interface{} + err error +} + +func ve(v interface{}, err error) valueError { + return valueError{v, err} +} + +var replyTests = []struct { + name interface{} + actual valueError + expected valueError +}{ + { + "ints([v1, v2])", + ve(redis.Ints([]interface{}{[]byte("4"), []byte("5")}, nil)), + ve([]int{4, 5}, nil), + }, + { + "ints(nil)", + ve(redis.Ints(nil, nil)), + ve([]int(nil), redis.ErrNil), + }, + { + "strings([v1, v2])", + ve(redis.Strings([]interface{}{[]byte("v1"), []byte("v2")}, nil)), + ve([]string{"v1", "v2"}, nil), + }, + { + "strings(nil)", + ve(redis.Strings(nil, nil)), + ve([]string(nil), redis.ErrNil), + }, + { + "values([v1, v2])", + ve(redis.Values([]interface{}{[]byte("v1"), []byte("v2")}, nil)), + ve([]interface{}{[]byte("v1"), []byte("v2")}, nil), + }, + { + "values(nil)", + ve(redis.Values(nil, nil)), + ve([]interface{}(nil), redis.ErrNil), + }, + { + "float64(1.0)", + ve(redis.Float64([]byte("1.0"), nil)), + ve(float64(1.0), nil), + }, + { + "float64(nil)", + ve(redis.Float64(nil, nil)), + ve(float64(0.0), redis.ErrNil), + }, + { + "uint64(1)", + ve(redis.Uint64(int64(1), nil)), + ve(uint64(1), nil), + }, + { + "uint64(-1)", + ve(redis.Uint64(int64(-1), nil)), + ve(uint64(0), redis.ErrNegativeInt), + }, +} + +func TestReply(t *testing.T) { + for _, rt := range replyTests { + if rt.actual.err != rt.expected.err { + t.Errorf("%s returned err %v, want %v", rt.name, rt.actual.err, rt.expected.err) + continue + } + if !reflect.DeepEqual(rt.actual.v, rt.expected.v) { + t.Errorf("%s=%+v, want %+v", rt.name, rt.actual.v, rt.expected.v) + } + } +} + +// dial wraps DialTestDB() with a more suitable function name for examples. +func dial() (redis.Conn, error) { + return redistest.Dial() +} + +func ExampleBool() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SET", "foo", 1) + exists, _ := redis.Bool(c.Do("EXISTS", "foo")) + fmt.Printf("%#v\n", exists) + // Output: + // true +} + +func ExampleInt() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SET", "k1", 1) + n, _ := redis.Int(c.Do("GET", "k1")) + fmt.Printf("%#v\n", n) + n, _ = redis.Int(c.Do("INCR", "k1")) + fmt.Printf("%#v\n", n) + // Output: + // 1 + // 2 +} + +func ExampleInts() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SADD", "set_with_integers", 4, 5, 6) + ints, _ := redis.Ints(c.Do("SMEMBERS", "set_with_integers")) + fmt.Printf("%#v\n", ints) + // Output: + // []int{4, 5, 6} +} + +func ExampleString() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SET", "hello", "world") + s, err := redis.String(c.Do("GET", "hello")) + fmt.Printf("%#v\n", s) + // Output: + // "world" +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go new file mode 100644 index 000000000000..8c9cfa18d479 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go @@ -0,0 +1,513 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" +) + +func ensureLen(d reflect.Value, n int) { + if n > d.Cap() { + d.Set(reflect.MakeSlice(d.Type(), n, n)) + } else { + d.SetLen(n) + } +} + +func cannotConvert(d reflect.Value, s interface{}) error { + return fmt.Errorf("redigo: Scan cannot convert from %s to %s", + reflect.TypeOf(s), d.Type()) +} + +func convertAssignBytes(d reflect.Value, s []byte) (err error) { + switch d.Type().Kind() { + case reflect.Float32, reflect.Float64: + var x float64 + x, err = strconv.ParseFloat(string(s), d.Type().Bits()) + d.SetFloat(x) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var x int64 + x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) + d.SetInt(x) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var x uint64 + x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) + d.SetUint(x) + case reflect.Bool: + var x bool + x, err = strconv.ParseBool(string(s)) + d.SetBool(x) + case reflect.String: + d.SetString(string(s)) + case reflect.Slice: + if d.Type().Elem().Kind() != reflect.Uint8 { + err = cannotConvert(d, s) + } else { + d.SetBytes(s) + } + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignInt(d reflect.Value, s int64) (err error) { + switch d.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + d.SetInt(s) + if d.Int() != s { + err = strconv.ErrRange + d.SetInt(0) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if s < 0 { + err = strconv.ErrRange + } else { + x := uint64(s) + d.SetUint(x) + if d.Uint() != x { + err = strconv.ErrRange + d.SetUint(0) + } + } + case reflect.Bool: + d.SetBool(s != 0) + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignValue(d reflect.Value, s interface{}) (err error) { + switch s := s.(type) { + case []byte: + err = convertAssignBytes(d, s) + case int64: + err = convertAssignInt(d, s) + default: + err = cannotConvert(d, s) + } + return err +} + +func convertAssignValues(d reflect.Value, s []interface{}) error { + if d.Type().Kind() != reflect.Slice { + return cannotConvert(d, s) + } + ensureLen(d, len(s)) + for i := 0; i < len(s); i++ { + if err := convertAssignValue(d.Index(i), s[i]); err != nil { + return err + } + } + return nil +} + +func convertAssign(d interface{}, s interface{}) (err error) { + // Handle the most common destination types using type switches and + // fall back to reflection for all other types. + switch s := s.(type) { + case nil: + // ingore + case []byte: + switch d := d.(type) { + case *string: + *d = string(s) + case *int: + *d, err = strconv.Atoi(string(s)) + case *bool: + *d, err = strconv.ParseBool(string(s)) + case *[]byte: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignBytes(d.Elem(), s) + } + } + case int64: + switch d := d.(type) { + case *int: + x := int(s) + if int64(x) != s { + err = strconv.ErrRange + x = 0 + } + *d = x + case *bool: + *d = s != 0 + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignInt(d.Elem(), s) + } + } + case []interface{}: + switch d := d.(type) { + case *[]interface{}: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignValues(d.Elem(), s) + } + } + case Error: + err = s + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + return +} + +// Scan copies from src to the values pointed at by dest. +// +// The values pointed at by dest must be an integer, float, boolean, string, +// []byte, interface{} or slices of these types. Scan uses the standard strconv +// package to convert bulk strings to numeric and boolean types. +// +// If a dest value is nil, then the corresponding src value is skipped. +// +// If a src element is nil, then the corresponding dest value is not modified. +// +// To enable easy use of Scan in a loop, Scan returns the slice of src +// following the copied values. +func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { + if len(src) < len(dest) { + return nil, errors.New("redigo: Scan array short") + } + var err error + for i, d := range dest { + err = convertAssign(d, src[i]) + if err != nil { + break + } + } + return src[len(dest):], err +} + +type fieldSpec struct { + name string + index []int + //omitEmpty bool +} + +type structSpec struct { + m map[string]*fieldSpec + l []*fieldSpec +} + +func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { + return ss.m[string(name)] +} + +func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + switch { + case f.PkgPath != "": + // Ignore unexported fields. + case f.Anonymous: + // TODO: Handle pointers. Requires change to decoder and + // protection against infinite recursion. + if f.Type.Kind() == reflect.Struct { + compileStructSpec(f.Type, depth, append(index, i), ss) + } + default: + fs := &fieldSpec{name: f.Name} + tag := f.Tag.Get("redis") + p := strings.Split(tag, ",") + if len(p) > 0 { + if p[0] == "-" { + continue + } + if len(p[0]) > 0 { + fs.name = p[0] + } + for _, s := range p[1:] { + switch s { + //case "omitempty": + // fs.omitempty = true + default: + panic(errors.New("redigo: unknown field flag " + s + " for type " + t.Name())) + } + } + } + d, found := depth[fs.name] + if !found { + d = 1 << 30 + } + switch { + case len(index) == d: + // At same depth, remove from result. + delete(ss.m, fs.name) + j := 0 + for i := 0; i < len(ss.l); i++ { + if fs.name != ss.l[i].name { + ss.l[j] = ss.l[i] + j += 1 + } + } + ss.l = ss.l[:j] + case len(index) < d: + fs.index = make([]int, len(index)+1) + copy(fs.index, index) + fs.index[len(index)] = i + depth[fs.name] = len(index) + ss.m[fs.name] = fs + ss.l = append(ss.l, fs) + } + } + } +} + +var ( + structSpecMutex sync.RWMutex + structSpecCache = make(map[reflect.Type]*structSpec) + defaultFieldSpec = &fieldSpec{} +) + +func structSpecForType(t reflect.Type) *structSpec { + + structSpecMutex.RLock() + ss, found := structSpecCache[t] + structSpecMutex.RUnlock() + if found { + return ss + } + + structSpecMutex.Lock() + defer structSpecMutex.Unlock() + ss, found = structSpecCache[t] + if found { + return ss + } + + ss = &structSpec{m: make(map[string]*fieldSpec)} + compileStructSpec(t, make(map[string]int), nil, ss) + structSpecCache[t] = ss + return ss +} + +var errScanStructValue = errors.New("redigo: ScanStruct value must be non-nil pointer to a struct") + +// ScanStruct scans alternating names and values from src to a struct. The +// HGETALL and CONFIG GET commands return replies in this format. +// +// ScanStruct uses exported field names to match values in the response. Use +// 'redis' field tag to override the name: +// +// Field int `redis:"myName"` +// +// Fields with the tag redis:"-" are ignored. +// +// Integer, float, boolean, string and []byte fields are supported. Scan uses the +// standard strconv package to convert bulk string values to numeric and +// boolean types. +// +// If a src element is nil, then the corresponding field is not modified. +func ScanStruct(src []interface{}, dest interface{}) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanStructValue + } + d = d.Elem() + if d.Kind() != reflect.Struct { + return errScanStructValue + } + ss := structSpecForType(d.Type()) + + if len(src)%2 != 0 { + return errors.New("redigo: ScanStruct expects even number of values in values") + } + + for i := 0; i < len(src); i += 2 { + s := src[i+1] + if s == nil { + continue + } + name, ok := src[i].([]byte) + if !ok { + return errors.New("redigo: ScanStruct key not a bulk string value") + } + fs := ss.fieldSpec(name) + if fs == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return err + } + } + return nil +} + +var ( + errScanSliceValue = errors.New("redigo: ScanSlice dest must be non-nil pointer to a struct") +) + +// ScanSlice scans src to the slice pointed to by dest. The elements the dest +// slice must be integer, float, boolean, string, struct or pointer to struct +// values. +// +// Struct fields must be integer, float, boolean or string values. All struct +// fields are used unless a subset is specified using fieldNames. +func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanSliceValue + } + d = d.Elem() + if d.Kind() != reflect.Slice { + return errScanSliceValue + } + + isPtr := false + t := d.Type().Elem() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + isPtr = true + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + ensureLen(d, len(src)) + for i, s := range src { + if s == nil { + continue + } + if err := convertAssignValue(d.Index(i), s); err != nil { + return err + } + } + return nil + } + + ss := structSpecForType(t) + fss := ss.l + if len(fieldNames) > 0 { + fss = make([]*fieldSpec, len(fieldNames)) + for i, name := range fieldNames { + fss[i] = ss.m[name] + if fss[i] == nil { + return errors.New("redigo: ScanSlice bad field name " + name) + } + } + } + + if len(fss) == 0 { + return errors.New("redigo: ScanSlice no struct fields") + } + + n := len(src) / len(fss) + if n*len(fss) != len(src) { + return errors.New("redigo: ScanSlice length not a multiple of struct field count") + } + + ensureLen(d, n) + for i := 0; i < n; i++ { + d := d.Index(i) + if isPtr { + if d.IsNil() { + d.Set(reflect.New(t)) + } + d = d.Elem() + } + for j, fs := range fss { + s := src[i*len(fss)+j] + if s == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return err + } + } + } + return nil +} + +// Args is a helper for constructing command arguments from structured values. +type Args []interface{} + +// Add returns the result of appending value to args. +func (args Args) Add(value ...interface{}) Args { + return append(args, value...) +} + +// AddFlat returns the result of appending the flattened value of v to args. +// +// Maps are flattened by appending the alternating keys and map values to args. +// +// Slices are flattened by appending the slice elements to args. +// +// Structs are flattened by appending the alternating names and values of +// exported fields to args. If v is a nil struct pointer, then nothing is +// appended. The 'redis' field tag overrides struct field names. See ScanStruct +// for more information on the use of the 'redis' field tag. +// +// Other types are appended to args as is. +func (args Args) AddFlat(v interface{}) Args { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Struct: + args = flattenStruct(args, rv) + case reflect.Slice: + for i := 0; i < rv.Len(); i++ { + args = append(args, rv.Index(i).Interface()) + } + case reflect.Map: + for _, k := range rv.MapKeys() { + args = append(args, k.Interface(), rv.MapIndex(k).Interface()) + } + case reflect.Ptr: + if rv.Type().Elem().Kind() == reflect.Struct { + if !rv.IsNil() { + args = flattenStruct(args, rv.Elem()) + } + } else { + args = append(args, v) + } + default: + args = append(args, v) + } + return args +} + +func flattenStruct(args Args, v reflect.Value) Args { + ss := structSpecForType(v.Type()) + for _, fs := range ss.l { + fv := v.FieldByIndex(fs.index) + args = append(args, fs.name, fv.Interface()) + } + return args +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan_test.go new file mode 100644 index 000000000000..b57dd89695e1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan_test.go @@ -0,0 +1,412 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "github.com/garyburd/redigo/redis" + "math" + "reflect" + "testing" +) + +var scanConversionTests = []struct { + src interface{} + dest interface{} +}{ + {[]byte("-inf"), math.Inf(-1)}, + {[]byte("+inf"), math.Inf(1)}, + {[]byte("0"), float64(0)}, + {[]byte("3.14159"), float64(3.14159)}, + {[]byte("3.14"), float32(3.14)}, + {[]byte("-100"), int(-100)}, + {[]byte("101"), int(101)}, + {int64(102), int(102)}, + {[]byte("103"), uint(103)}, + {int64(104), uint(104)}, + {[]byte("105"), int8(105)}, + {int64(106), int8(106)}, + {[]byte("107"), uint8(107)}, + {int64(108), uint8(108)}, + {[]byte("0"), false}, + {int64(0), false}, + {[]byte("f"), false}, + {[]byte("1"), true}, + {int64(1), true}, + {[]byte("t"), true}, + {[]byte("hello"), "hello"}, + {[]byte("world"), []byte("world")}, + {[]interface{}{[]byte("foo")}, []interface{}{[]byte("foo")}}, + {[]interface{}{[]byte("foo")}, []string{"foo"}}, + {[]interface{}{[]byte("hello"), []byte("world")}, []string{"hello", "world"}}, + {[]interface{}{[]byte("bar")}, [][]byte{[]byte("bar")}}, + {[]interface{}{[]byte("1")}, []int{1}}, + {[]interface{}{[]byte("1"), []byte("2")}, []int{1, 2}}, + {[]interface{}{[]byte("1"), []byte("2")}, []float64{1, 2}}, + {[]interface{}{[]byte("1")}, []byte{1}}, + {[]interface{}{[]byte("1")}, []bool{true}}, +} + +func TestScanConversion(t *testing.T) { + for _, tt := range scanConversionTests { + values := []interface{}{tt.src} + dest := reflect.New(reflect.TypeOf(tt.dest)) + values, err := redis.Scan(values, dest.Interface()) + if err != nil { + t.Errorf("Scan(%v) returned error %v", tt, err) + continue + } + if !reflect.DeepEqual(tt.dest, dest.Elem().Interface()) { + t.Errorf("Scan(%v) returned %v, want %v", tt, dest.Elem().Interface(), tt.dest) + } + } +} + +var scanConversionErrorTests = []struct { + src interface{} + dest interface{} +}{ + {[]byte("1234"), byte(0)}, + {int64(1234), byte(0)}, + {[]byte("-1"), byte(0)}, + {int64(-1), byte(0)}, + {[]byte("junk"), false}, + {redis.Error("blah"), false}, +} + +func TestScanConversionError(t *testing.T) { + for _, tt := range scanConversionErrorTests { + values := []interface{}{tt.src} + dest := reflect.New(reflect.TypeOf(tt.dest)) + values, err := redis.Scan(values, dest.Interface()) + if err == nil { + t.Errorf("Scan(%v) did not return error", tt) + } + } +} + +func ExampleScan() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Send("HMSET", "album:1", "title", "Red", "rating", 5) + c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) + c.Send("HMSET", "album:3", "title", "Beat") + c.Send("LPUSH", "albums", "1") + c.Send("LPUSH", "albums", "2") + c.Send("LPUSH", "albums", "3") + values, err := redis.Values(c.Do("SORT", "albums", + "BY", "album:*->rating", + "GET", "album:*->title", + "GET", "album:*->rating")) + if err != nil { + panic(err) + } + + for len(values) > 0 { + var title string + rating := -1 // initialize to illegal value to detect nil. + values, err = redis.Scan(values, &title, &rating) + if err != nil { + panic(err) + } + if rating == -1 { + fmt.Println(title, "not-rated") + } else { + fmt.Println(title, rating) + } + } + // Output: + // Beat not-rated + // Earthbound 1 + // Red 5 +} + +type s0 struct { + X int + Y int `redis:"y"` + Bt bool +} + +type s1 struct { + X int `redis:"-"` + I int `redis:"i"` + U uint `redis:"u"` + S string `redis:"s"` + P []byte `redis:"p"` + B bool `redis:"b"` + Bt bool + Bf bool + s0 +} + +var scanStructTests = []struct { + title string + reply []string + value interface{} +}{ + {"basic", + []string{"i", "-1234", "u", "5678", "s", "hello", "p", "world", "b", "t", "Bt", "1", "Bf", "0", "X", "123", "y", "456"}, + &s1{I: -1234, U: 5678, S: "hello", P: []byte("world"), B: true, Bt: true, Bf: false, s0: s0{X: 123, Y: 456}}, + }, +} + +func TestScanStruct(t *testing.T) { + for _, tt := range scanStructTests { + + var reply []interface{} + for _, v := range tt.reply { + reply = append(reply, []byte(v)) + } + + value := reflect.New(reflect.ValueOf(tt.value).Type().Elem()) + + if err := redis.ScanStruct(reply, value.Interface()); err != nil { + t.Fatalf("ScanStruct(%s) returned error %v", tt.title, err) + } + + if !reflect.DeepEqual(value.Interface(), tt.value) { + t.Fatalf("ScanStruct(%s) returned %v, want %v", tt.title, value.Interface(), tt.value) + } + } +} + +func TestBadScanStructArgs(t *testing.T) { + x := []interface{}{"A", "b"} + test := func(v interface{}) { + if err := redis.ScanStruct(x, v); err == nil { + t.Errorf("Expect error for ScanStruct(%T, %T)", x, v) + } + } + + test(nil) + + var v0 *struct{} + test(v0) + + var v1 int + test(&v1) + + x = x[:1] + v2 := struct{ A string }{} + test(&v2) +} + +var scanSliceTests = []struct { + src []interface{} + fieldNames []string + ok bool + dest interface{} +}{ + { + []interface{}{[]byte("1"), nil, []byte("-1")}, + nil, + true, + []int{1, 0, -1}, + }, + { + []interface{}{[]byte("1"), nil, []byte("2")}, + nil, + true, + []uint{1, 0, 2}, + }, + { + []interface{}{[]byte("-1")}, + nil, + false, + []uint{1}, + }, + { + []interface{}{[]byte("hello"), nil, []byte("world")}, + nil, + true, + [][]byte{[]byte("hello"), nil, []byte("world")}, + }, + { + []interface{}{[]byte("hello"), nil, []byte("world")}, + nil, + true, + []string{"hello", "", "world"}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + nil, + true, + []struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1")}, + nil, + false, + []struct{ A, B, C string }{{"a1", "b1", ""}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + nil, + true, + []*struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + []string{"A", "B"}, + true, + []struct{ A, C, B string }{{"a1", "", "b1"}, {"a2", "", "b2"}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + nil, + false, + []struct{}{}, + }, +} + +func TestScanSlice(t *testing.T) { + for _, tt := range scanSliceTests { + + typ := reflect.ValueOf(tt.dest).Type() + dest := reflect.New(typ) + + err := redis.ScanSlice(tt.src, dest.Interface(), tt.fieldNames...) + if tt.ok != (err == nil) { + t.Errorf("ScanSlice(%v, []%s, %v) returned error %v", tt.src, typ, tt.fieldNames, err) + continue + } + if tt.ok && !reflect.DeepEqual(dest.Elem().Interface(), tt.dest) { + t.Errorf("ScanSlice(src, []%s) returned %#v, want %#v", typ, dest.Elem().Interface(), tt.dest) + } + } +} + +func ExampleScanSlice() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Send("HMSET", "album:1", "title", "Red", "rating", 5) + c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) + c.Send("HMSET", "album:3", "title", "Beat", "rating", 4) + c.Send("LPUSH", "albums", "1") + c.Send("LPUSH", "albums", "2") + c.Send("LPUSH", "albums", "3") + values, err := redis.Values(c.Do("SORT", "albums", + "BY", "album:*->rating", + "GET", "album:*->title", + "GET", "album:*->rating")) + if err != nil { + panic(err) + } + + var albums []struct { + Title string + Rating int + } + if err := redis.ScanSlice(values, &albums); err != nil { + panic(err) + } + fmt.Printf("%v\n", albums) + // Output: + // [{Earthbound 1} {Beat 4} {Red 5}] +} + +var argsTests = []struct { + title string + actual redis.Args + expected redis.Args +}{ + {"struct ptr", + redis.Args{}.AddFlat(&struct { + I int `redis:"i"` + U uint `redis:"u"` + S string `redis:"s"` + P []byte `redis:"p"` + Bt bool + Bf bool + }{ + -1234, 5678, "hello", []byte("world"), true, false, + }), + redis.Args{"i", int(-1234), "u", uint(5678), "s", "hello", "p", []byte("world"), "Bt", true, "Bf", false}, + }, + {"struct", + redis.Args{}.AddFlat(struct{ I int }{123}), + redis.Args{"I", 123}, + }, + {"slice", + redis.Args{}.Add(1).AddFlat([]string{"a", "b", "c"}).Add(2), + redis.Args{1, "a", "b", "c", 2}, + }, +} + +func TestArgs(t *testing.T) { + for _, tt := range argsTests { + if !reflect.DeepEqual(tt.actual, tt.expected) { + t.Fatalf("%s is %v, want %v", tt.title, tt.actual, tt.expected) + } + } +} + +func ExampleArgs() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + var p1, p2 struct { + Title string `redis:"title"` + Author string `redis:"author"` + Body string `redis:"body"` + } + + p1.Title = "Example" + p1.Author = "Gary" + p1.Body = "Hello" + + if _, err := c.Do("HMSET", redis.Args{}.Add("id1").AddFlat(&p1)...); err != nil { + panic(err) + } + + m := map[string]string{ + "title": "Example2", + "author": "Steve", + "body": "Map", + } + + if _, err := c.Do("HMSET", redis.Args{}.Add("id2").AddFlat(m)...); err != nil { + panic(err) + } + + for _, id := range []string{"id1", "id2"} { + + v, err := redis.Values(c.Do("HGETALL", id)) + if err != nil { + panic(err) + } + + if err := redis.ScanStruct(v, &p2); err != nil { + panic(err) + } + + fmt.Printf("%+v\n", p2) + } + + // Output: + // {Title:Example Author:Gary Body:Hello} + // {Title:Example2 Author:Steve Body:Map} +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go new file mode 100644 index 000000000000..78605a90a83f --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go @@ -0,0 +1,86 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +// Script encapsulates the source, hash and key count for a Lua script. See +// http://redis.io/commands/eval for information on scripts in Redis. +type Script struct { + keyCount int + src string + hash string +} + +// NewScript returns a new script object. If keyCount is greater than or equal +// to zero, then the count is automatically inserted in the EVAL command +// argument list. If keyCount is less than zero, then the application supplies +// the count as the first value in the keysAndArgs argument to the Do, Send and +// SendHash methods. +func NewScript(keyCount int, src string) *Script { + h := sha1.New() + io.WriteString(h, src) + return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} +} + +func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { + var args []interface{} + if s.keyCount < 0 { + args = make([]interface{}, 1+len(keysAndArgs)) + args[0] = spec + copy(args[1:], keysAndArgs) + } else { + args = make([]interface{}, 2+len(keysAndArgs)) + args[0] = spec + args[1] = s.keyCount + copy(args[2:], keysAndArgs) + } + return args +} + +// Do evaluates the script. Under the covers, Do optimistically evaluates the +// script using the EVALSHA command. If the command fails because the script is +// not loaded, then Do evaluates the script using the EVAL command (thus +// causing the script to load). +func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { + v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) + if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { + v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) + } + return v, err +} + +// SendHash evaluates the script without waiting for the reply. The script is +// evaluated with the EVALSHA command. The application must ensure that the +// script is loaded by a previous call to Send, Do or Load methods. +func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) +} + +// Send evaluates the script without waiting for the reply. +func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVAL", s.args(s.src, keysAndArgs)...) +} + +// Load loads the script without evaluating it. +func (s *Script) Load(c Conn) error { + _, err := c.Do("SCRIPT", "LOAD", s.src) + return err +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script_test.go new file mode 100644 index 000000000000..c9635bf08e3d --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script_test.go @@ -0,0 +1,93 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +func ExampleScript(c redis.Conn, reply interface{}, err error) { + // Initialize a package-level variable with a script. + var getScript = redis.NewScript(1, `return redis.call('get', KEYS[1])`) + + // In a function, use the script Do method to evaluate the script. The Do + // method optimistically uses the EVALSHA command. If the script is not + // loaded, then the Do method falls back to the EVAL command. + reply, err = getScript.Do(c, "foo") +} + +func TestScript(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + // To test fall back in Do, we make script unique by adding comment with current time. + script := fmt.Sprintf("--%d\nreturn {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", time.Now().UnixNano()) + s := redis.NewScript(2, script) + reply := []interface{}{[]byte("key1"), []byte("key2"), []byte("arg1"), []byte("arg2")} + + v, err := s.Do(c, "key1", "key2", "arg1", "arg2") + if err != nil { + t.Errorf("s.Do(c, ...) returned %v", err) + } + + if !reflect.DeepEqual(v, reply) { + t.Errorf("s.Do(c, ..); = %v, want %v", v, reply) + } + + err = s.Load(c) + if err != nil { + t.Errorf("s.Load(c) returned %v", err) + } + + err = s.SendHash(c, "key1", "key2", "arg1", "arg2") + if err != nil { + t.Errorf("s.SendHash(c, ...) returned %v", err) + } + + err = c.Flush() + if err != nil { + t.Errorf("c.Flush() returned %v", err) + } + + v, err = c.Receive() + if !reflect.DeepEqual(v, reply) { + t.Errorf("s.SendHash(c, ..); c.Receive() = %v, want %v", v, reply) + } + + err = s.Send(c, "key1", "key2", "arg1", "arg2") + if err != nil { + t.Errorf("s.Send(c, ...) returned %v", err) + } + + err = c.Flush() + if err != nil { + t.Errorf("c.Flush() returned %v", err) + } + + v, err = c.Receive() + if !reflect.DeepEqual(v, reply) { + t.Errorf("s.Send(c, ..); c.Receive() = %v, want %v", v, reply) + } + +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/test_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/test_test.go new file mode 100644 index 000000000000..b959a11f4f77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/test_test.go @@ -0,0 +1,38 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "net" + "time" +) + +func SetNowFunc(f func() time.Time) { + nowFunc = f +} + +type nopCloser struct{ net.Conn } + +func (nopCloser) Close() error { return nil } + +// NewConnBufio is a hook for tests. +func NewConnBufio(rw bufio.ReadWriter) Conn { + return &conn{br: rw.Reader, bw: rw.Writer, conn: nopCloser{}} +} + +var ( + ErrNegativeInt = errNegativeInt +) diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/zpop_example_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/zpop_example_test.go new file mode 100644 index 000000000000..1d86ee6ce8c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/zpop_example_test.go @@ -0,0 +1,113 @@ +// Copyright 2013 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "github.com/garyburd/redigo/redis" +) + +// zpop pops a value from the ZSET key using WATCH/MULTI/EXEC commands. +func zpop(c redis.Conn, key string) (result string, err error) { + + defer func() { + // Return connection to normal state on error. + if err != nil { + c.Do("DISCARD") + } + }() + + // Loop until transaction is successful. + for { + if _, err := c.Do("WATCH", key); err != nil { + return "", err + } + + members, err := redis.Strings(c.Do("ZRANGE", key, 0, 0)) + if err != nil { + return "", err + } + if len(members) != 1 { + return "", redis.ErrNil + } + + c.Send("MULTI") + c.Send("ZREM", key, members[0]) + queued, err := c.Do("EXEC") + if err != nil { + return "", err + } + + if queued != nil { + result = members[0] + break + } + } + + return result, nil +} + +// zpopScript pops a value from a ZSET. +var zpopScript = redis.NewScript(1, ` + local r = redis.call('ZRANGE', KEYS[1], 0, 0) + if r ~= nil then + r = r[1] + redis.call('ZREM', KEYS[1], r) + end + return r +`) + +// This example implements ZPOP as described at +// http://redis.io/topics/transactions using WATCH/MULTI/EXEC and scripting. +func Example_zpop() { + c, err := dial() + if err != nil { + fmt.Println(err) + return + } + defer c.Close() + + // Add test data using a pipeline. + + for i, member := range []string{"red", "blue", "green"} { + c.Send("ZADD", "zset", i, member) + } + if _, err := c.Do(""); err != nil { + fmt.Println(err) + return + } + + // Pop using WATCH/MULTI/EXEC + + v, err := zpop(c, "zset") + if err != nil { + fmt.Println(err) + return + } + fmt.Println(v) + + // Pop using a script. + + v, err = redis.String(zpopScript.Do(c, "zset")) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(v) + + // Output: + // red + // blue +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/.gitignore b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/.gitignore new file mode 100644 index 000000000000..daf913b1b347 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/LICENSE b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/LICENSE new file mode 100644 index 000000000000..2815cc36c9d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/README.md b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/README.md new file mode 100644 index 000000000000..8466f55e3d78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/README.md @@ -0,0 +1,6 @@ +# go-crypto +A Subset of the Go `crypto` Package with a Resumable Hash Interface + +### Documentation + +GoDocs: http://godoc.org/github.com/jlhawn/go-crypto diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/crypto.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/crypto.go new file mode 100644 index 000000000000..cc684dee1db8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/crypto.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crypto is a Subset of the Go `crypto` Package with a Resumable Hash +package crypto + +import ( + "hash" + "strconv" +) + +// Hash identifies a cryptographic hash function that is implemented in another +// package. +type Hash uint + +// HashFunc simply returns the value of h so that Hash implements SignerOpts. +func (h Hash) HashFunc() Hash { + return h +} + +const ( + SHA224 Hash = 1 + iota // import crypto/sha256 + SHA256 // import crypto/sha256 + SHA384 // import crypto/sha512 + SHA512 // import crypto/sha512 + maxHash +) + +var digestSizes = []uint8{ + SHA224: 28, + SHA256: 32, + SHA384: 48, + SHA512: 64, +} + +// Size returns the length, in bytes, of a digest resulting from the given hash +// function. It doesn't require that the hash function in question be linked +// into the program. +func (h Hash) Size() int { + if h > 0 && h < maxHash { + return int(digestSizes[h]) + } + panic("crypto: Size of unknown hash function") +} + +// ResumableHash is the common interface implemented by all resumable hash +// functions. +type ResumableHash interface { + // ResumableHash is a superset of hash.Hash + hash.Hash + // Len returns the number of bytes written to the Hash so far. + Len() uint64 + // State returns a snapshot of the state of the Hash. + State() ([]byte, error) + // Restore resets the Hash to the given state. + Restore(state []byte) error +} + +var hashes = make([]func() ResumableHash, maxHash) + +// New returns a new ResumableHash calculating the given hash function. New panics +// if the hash function is not linked into the binary. +func (h Hash) New() ResumableHash { + if h > 0 && h < maxHash { + f := hashes[h] + if f != nil { + return f() + } + } + panic("crypto: requested hash function #" + strconv.Itoa(int(h)) + " is unavailable") +} + +// Available reports whether the given hash function is linked into the binary. +func (h Hash) Available() bool { + return h < maxHash && hashes[h] != nil +} + +// RegisterHash registers a function that returns a new instance of the given +// hash function. This is intended to be called from the init function in +// packages that implement hash functions. +func RegisterHash(h Hash, f func() ResumableHash) { + if h >= maxHash { + panic("crypto: RegisterHash of unknown hash function") + } + hashes[h] = f +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/resume.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/resume.go new file mode 100644 index 000000000000..b31107ec5016 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/resume.go @@ -0,0 +1,50 @@ +package sha256 + +import ( + "bytes" + "encoding/gob" +) + +// Len returns the number of bytes which have been written to the digest. +func (d *digest) Len() uint64 { + return d.len +} + +// State returns a snapshot of the state of the digest. +func (d *digest) State() ([]byte, error) { + var buf bytes.Buffer + encoder := gob.NewEncoder(&buf) + + // We encode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + d.h, d.x, d.nx, d.len, d.is224, + } + + for _, val := range vals { + if err := encoder.Encode(val); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +// Restore resets the digest to the given state. +func (d *digest) Restore(state []byte) error { + decoder := gob.NewDecoder(bytes.NewReader(state)) + + // We decode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + &d.h, &d.x, &d.nx, &d.len, &d.is224, + } + + for _, val := range vals { + if err := decoder.Decode(val); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256.go new file mode 100644 index 000000000000..8a97a2f3ed22 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256.go @@ -0,0 +1,192 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha256 implements the SHA224 and SHA256 hash algorithms as defined +// in FIPS 180-4. +package sha256 + +import ( + "github.com/jlhawn/go-crypto" +) + +func init() { + crypto.RegisterHash(crypto.SHA224, New224) + crypto.RegisterHash(crypto.SHA256, New) +} + +// The size of a SHA256 checksum in bytes. +const Size = 32 + +// The size of a SHA224 checksum in bytes. +const Size224 = 28 + +// The blocksize of SHA256 and SHA224 in bytes. +const BlockSize = 64 + +const ( + chunk = 64 + init0 = 0x6A09E667 + init1 = 0xBB67AE85 + init2 = 0x3C6EF372 + init3 = 0xA54FF53A + init4 = 0x510E527F + init5 = 0x9B05688C + init6 = 0x1F83D9AB + init7 = 0x5BE0CD19 + init0_224 = 0xC1059ED8 + init1_224 = 0x367CD507 + init2_224 = 0x3070DD17 + init3_224 = 0xF70E5939 + init4_224 = 0xFFC00B31 + init5_224 = 0x68581511 + init6_224 = 0x64F98FA7 + init7_224 = 0xBEFA4FA4 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [8]uint32 + x [chunk]byte + nx int + len uint64 + is224 bool // mark if this digest is SHA-224 +} + +func (d *digest) Reset() { + if !d.is224 { + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 + } else { + d.h[0] = init0_224 + d.h[1] = init1_224 + d.h[2] = init2_224 + d.h[3] = init3_224 + d.h[4] = init4_224 + d.h[5] = init5_224 + d.h[6] = init6_224 + d.h[7] = init7_224 + } + d.nx = 0 + d.len = 0 +} + +// New returns a new crypto.ResumableHash computing the SHA256 checksum. +func New() crypto.ResumableHash { + d := new(digest) + d.Reset() + return d +} + +// New224 returns a new crypto.ResumableHash computing the SHA224 checksum. +func New224() crypto.ResumableHash { + d := new(digest) + d.is224 = true + d.Reset() + return d +} + +func (d *digest) Size() int { + if !d.is224 { + return Size + } + return Size224 +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := *d0 + hash := d.checkSum() + if d.is224 { + return append(in, hash[:Size224]...) + } + return append(in, hash[:]...) +} + +func (d *digest) checkSum() [Size]byte { + len := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + d.Write(tmp[0 : 56-len%64]) + } else { + d.Write(tmp[0 : 64+56-len%64]) + } + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (56 - 8*i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + h := d.h[:] + if d.is224 { + h = d.h[:7] + } + + var digest [Size]byte + for i, s := range h { + digest[i*4] = byte(s >> 24) + digest[i*4+1] = byte(s >> 16) + digest[i*4+2] = byte(s >> 8) + digest[i*4+3] = byte(s) + } + + return digest +} + +// Sum256 returns the SHA256 checksum of the data. +func Sum256(data []byte) [Size]byte { + var d digest + d.Reset() + d.Write(data) + return d.checkSum() +} + +// Sum224 returns the SHA224 checksum of the data. +func Sum224(data []byte) (sum224 [Size224]byte) { + var d digest + d.is224 = true + d.Reset() + d.Write(data) + sum := d.checkSum() + copy(sum224[:], sum[:Size224]) + return +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256_test.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256_test.go new file mode 100644 index 000000000000..1d883d390595 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256_test.go @@ -0,0 +1,176 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// SHA256 hash algorithm. See FIPS 180-2. + +package sha256 + +import ( + "fmt" + "io" + "testing" +) + +type sha256Test struct { + out string + in string +} + +var golden = []sha256Test{ + {"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", ""}, + {"ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb", "a"}, + {"fb8e20fc2e4c3f248c60c39bd652f3c1347298bb977b8b4d5903b85055620603", "ab"}, + {"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", "abc"}, + {"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", "abcd"}, + {"36bbe50ed96841d10443bcb670d6554f0a34b761be67ec9c4a8ad2c0c44ca42c", "abcde"}, + {"bef57ec7f53a6d40beb640a780a639c83bc29ac8a9816f1fc6c5c6dcd93c4721", "abcdef"}, + {"7d1a54127b222502f5b79b5fb0803061152a44f92b37e23c6527baf665d4da9a", "abcdefg"}, + {"9c56cc51b374c3ba189210d5b6d4bf57790d351c96c47c02190ecf1e430635ab", "abcdefgh"}, + {"19cc02f26df43cc571bc9ed7b0c4d29224a3ec229529221725ef76d021c8326f", "abcdefghi"}, + {"72399361da6a7754fec986dca5b7cbaf1c810a28ded4abaf56b2106d06cb78b0", "abcdefghij"}, + {"a144061c271f152da4d151034508fed1c138b8c976339de229c3bb6d4bbb4fce", "Discard medicine more than two years old."}, + {"6dae5caa713a10ad04b46028bf6dad68837c581616a1589a265a11288d4bb5c4", "He who has a shady past knows that nice guys finish last."}, + {"ae7a702a9509039ddbf29f0765e70d0001177914b86459284dab8b348c2dce3f", "I wouldn't marry him with a ten foot pole."}, + {"6748450b01c568586715291dfa3ee018da07d36bb7ea6f180c1af6270215c64f", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {"14b82014ad2b11f661b5ae6a99b75105c2ffac278cd071cd6c05832793635774", "The days of the digital watch are numbered. -Tom Stoppard"}, + {"7102cfd76e2e324889eece5d6c41921b1e142a4ac5a2692be78803097f6a48d8", "Nepal premier won't resign."}, + {"23b1018cd81db1d67983c5f7417c44da9deb582459e378d7a068552ea649dc9f", "For every action there is an equal and opposite government program."}, + {"8001f190dfb527261c4cfcab70c98e8097a7a1922129bc4096950e57c7999a5a", "His money is twice tainted: 'taint yours and 'taint mine."}, + {"8c87deb65505c3993eb24b7a150c4155e82eee6960cf0c3a8114ff736d69cad5", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {"bfb0a67a19cdec3646498b2e0f751bddc41bba4b7f30081b0b932aad214d16d7", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {"7f9a0b9bf56332e19f5a0ec1ad9c1425a153da1c624868fda44561d6b74daf36", "size: a.out: bad magic"}, + {"b13f81b8aad9e3666879af19886140904f7f429ef083286195982a7588858cfc", "The major problem is with sendmail. -Mark Horton"}, + {"b26c38d61519e894480c70c8374ea35aa0ad05b2ae3d6674eec5f52a69305ed4", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {"049d5e26d4f10222cd841a119e38bd8d2e0d1129728688449575d4ff42b842c1", "If the enemy is within range, then so are you."}, + {"0e116838e3cc1c1a14cd045397e29b4d087aa11b0853fc69ec82e90330d60949", "It's well we cannot hear the screams/That we create in others' dreams."}, + {"4f7d8eb5bcf11de2a56b971021a444aa4eafd6ecd0f307b5109e4e776cd0fe46", "You remind me of a TV show, but that's all right: I watch it anyway."}, + {"61c0cc4c4bd8406d5120b3fb4ebc31ce87667c162f29468b3c779675a85aebce", "C is as portable as Stonehedge!!"}, + {"1fb2eb3688093c4a3f80cd87a5547e2ce940a4f923243a79a2a1e242220693ac", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {"395585ce30617b62c80b93e8208ce866d4edc811a177fdb4b82d3911d8696423", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {"4f9b189a13d030838269dce846b16a1ce9ce81fe63e65de2f636863336a98fe6", "How can you write a big system without C++? -Paul Glick"}, +} + +var golden224 = []sha256Test{ + {"d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", ""}, + {"abd37534c7d9a2efb9465de931cd7055ffdb8879563ae98078d6d6d5", "a"}, + {"db3cda86d4429a1d39c148989566b38f7bda0156296bd364ba2f878b", "ab"}, + {"23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7", "abc"}, + {"a76654d8e3550e9a2d67a0eeb6c67b220e5885eddd3fde135806e601", "abcd"}, + {"bdd03d560993e675516ba5a50638b6531ac2ac3d5847c61916cfced6", "abcde"}, + {"7043631cb415556a275a4ebecb802c74ee9f6153908e1792a90b6a98", "abcdef"}, + {"d1884e711701ad81abe0c77a3b0ea12e19ba9af64077286c72fc602d", "abcdefg"}, + {"17eb7d40f0356f8598e89eafad5f6c759b1f822975d9c9b737c8a517", "abcdefgh"}, + {"aeb35915346c584db820d2de7af3929ffafef9222a9bcb26516c7334", "abcdefghi"}, + {"d35e1e5af29ddb0d7e154357df4ad9842afee527c689ee547f753188", "abcdefghij"}, + {"19297f1cef7ddc8a7e947f5c5a341e10f7245045e425db67043988d7", "Discard medicine more than two years old."}, + {"0f10c2eb436251f777fbbd125e260d36aecf180411726c7c885f599a", "He who has a shady past knows that nice guys finish last."}, + {"4d1842104919f314cad8a3cd20b3cba7e8ed3e7abed62b57441358f6", "I wouldn't marry him with a ten foot pole."}, + {"a8ba85c6fe0c48fbffc72bbb2f03fcdbc87ae2dc7a56804d1590fb3b", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {"5543fbab26e67e8885b1a852d567d1cb8b9bfe42e0899584c50449a9", "The days of the digital watch are numbered. -Tom Stoppard"}, + {"65ca107390f5da9efa05d28e57b221657edc7e43a9a18fb15b053ddb", "Nepal premier won't resign."}, + {"84953962be366305a9cc9b5cd16ed019edc37ac96c0deb3e12cca116", "For every action there is an equal and opposite government program."}, + {"35a189ce987151dfd00b3577583cc6a74b9869eecf894459cb52038d", "His money is twice tainted: 'taint yours and 'taint mine."}, + {"2fc333713983edfd4ef2c0da6fb6d6415afb94987c91e4069eb063e6", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {"cbe32d38d577a1b355960a4bc3c659c2dc4670859a19777a875842c4", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {"a2dc118ce959e027576413a7b440c875cdc8d40df9141d6ef78a57e1", "size: a.out: bad magic"}, + {"d10787e24052bcff26dc484787a54ed819e4e4511c54890ee977bf81", "The major problem is with sendmail. -Mark Horton"}, + {"62efcf16ab8a893acdf2f348aaf06b63039ff1bf55508c830532c9fb", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {"3e9b7e4613c59f58665104c5fa86c272db5d3a2ff30df5bb194a5c99", "If the enemy is within range, then so are you."}, + {"5999c208b8bdf6d471bb7c359ac5b829e73a8211dff686143a4e7f18", "It's well we cannot hear the screams/That we create in others' dreams."}, + {"3b2d67ff54eabc4ef737b14edf87c64280ef582bcdf2a6d56908b405", "You remind me of a TV show, but that's all right: I watch it anyway."}, + {"d0733595d20e4d3d6b5c565a445814d1bbb2fd08b9a3b8ffb97930c6", "C is as portable as Stonehedge!!"}, + {"43fb8aeed8a833175c9295c1165415f98c866ef08a4922959d673507", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {"ec18e66e93afc4fb1604bc2baedbfd20b44c43d76e65c0996d7851c6", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {"86ed2eaa9c75ba98396e5c9fb2f679ecf0ea2ed1e0ee9ceecb4a9332", "How can you write a big system without C++? -Paul Glick"}, +} + +func TestGolden(t *testing.T) { + for i := 0; i < len(golden); i++ { + g := golden[i] + s := fmt.Sprintf("%x", Sum256([]byte(g.in))) + if s != g.out { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", g.in, s, g.out) + } + c := New() + for j := 0; j < 3; j++ { + if j < 2 { + io.WriteString(c, g.in) + } else { + io.WriteString(c, g.in[0:len(g.in)/2]) + c.Sum(nil) + io.WriteString(c, g.in[len(g.in)/2:]) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != g.out { + t.Fatalf("sha256[%d](%s) = %s want %s", j, g.in, s, g.out) + } + c.Reset() + } + } + for i := 0; i < len(golden224); i++ { + g := golden224[i] + s := fmt.Sprintf("%x", Sum224([]byte(g.in))) + if s != g.out { + t.Fatalf("Sum224 function: sha224(%s) = %s want %s", g.in, s, g.out) + } + c := New224() + for j := 0; j < 3; j++ { + if j < 2 { + io.WriteString(c, g.in) + } else { + io.WriteString(c, g.in[0:len(g.in)/2]) + c.Sum(nil) + io.WriteString(c, g.in[len(g.in)/2:]) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != g.out { + t.Fatalf("sha224[%d](%s) = %s want %s", j, g.in, s, g.out) + } + c.Reset() + } + } +} + +func TestSize(t *testing.T) { + c := New() + if got := c.Size(); got != Size { + t.Errorf("Size = %d; want %d", got, Size) + } + c = New224() + if got := c.Size(); got != Size224 { + t.Errorf("New224.Size = %d; want %d", got, Size224) + } +} + +func TestBlockSize(t *testing.T) { + c := New() + if got := c.BlockSize(); got != BlockSize { + t.Errorf("BlockSize = %d want %d", got, BlockSize) + } +} + +var bench = New() +var buf = make([]byte, 8192) + +func benchmarkSize(b *testing.B, size int) { + b.SetBytes(int64(size)) + sum := make([]byte, bench.Size()) + for i := 0; i < b.N; i++ { + bench.Reset() + bench.Write(buf[:size]) + bench.Sum(sum[:0]) + } +} + +func BenchmarkHash8Bytes(b *testing.B) { + benchmarkSize(b, 8) +} + +func BenchmarkHash1K(b *testing.B) { + benchmarkSize(b, 1024) +} + +func BenchmarkHash8K(b *testing.B) { + benchmarkSize(b, 8192) +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block.go new file mode 100644 index 000000000000..ca5efd156a9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block.go @@ -0,0 +1,128 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !386,!amd64 + +// SHA256 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package sha256 + +var _K = []uint32{ + 0x428a2f98, + 0x71374491, + 0xb5c0fbcf, + 0xe9b5dba5, + 0x3956c25b, + 0x59f111f1, + 0x923f82a4, + 0xab1c5ed5, + 0xd807aa98, + 0x12835b01, + 0x243185be, + 0x550c7dc3, + 0x72be5d74, + 0x80deb1fe, + 0x9bdc06a7, + 0xc19bf174, + 0xe49b69c1, + 0xefbe4786, + 0x0fc19dc6, + 0x240ca1cc, + 0x2de92c6f, + 0x4a7484aa, + 0x5cb0a9dc, + 0x76f988da, + 0x983e5152, + 0xa831c66d, + 0xb00327c8, + 0xbf597fc7, + 0xc6e00bf3, + 0xd5a79147, + 0x06ca6351, + 0x14292967, + 0x27b70a85, + 0x2e1b2138, + 0x4d2c6dfc, + 0x53380d13, + 0x650a7354, + 0x766a0abb, + 0x81c2c92e, + 0x92722c85, + 0xa2bfe8a1, + 0xa81a664b, + 0xc24b8b70, + 0xc76c51a3, + 0xd192e819, + 0xd6990624, + 0xf40e3585, + 0x106aa070, + 0x19a4c116, + 0x1e376c08, + 0x2748774c, + 0x34b0bcb5, + 0x391c0cb3, + 0x4ed8aa4a, + 0x5b9cca4f, + 0x682e6ff3, + 0x748f82ee, + 0x78a5636f, + 0x84c87814, + 0x8cc70208, + 0x90befffa, + 0xa4506ceb, + 0xbef9a3f7, + 0xc67178f2, +} + +func block(dig *digest, p []byte) { + var w [64]uint32 + h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] + for len(p) >= chunk { + // Can interlace the computation of w with the + // rounds below if needed for speed. + for i := 0; i < 16; i++ { + j := i * 4 + w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) + } + for i := 16; i < 64; i++ { + v1 := w[i-2] + t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) + v2 := w[i-15] + t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + + a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 + + for i := 0; i < 64; i++ { + t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + + t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + h5 += f + h6 += g + h7 += h + + p = p[chunk:] + } + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block_386.s b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block_386.s new file mode 100644 index 000000000000..73ae2bf300eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block_386.s @@ -0,0 +1,283 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// SHA256 block routine. See sha256block.go for Go equivalent. +// +// The algorithm is detailed in FIPS 180-4: +// +// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf +// +// Wt = Mt; for 0 <= t <= 15 +// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// +// a = H0 +// b = H1 +// c = H2 +// d = H3 +// e = H4 +// f = H5 +// g = H6 +// h = H7 +// +// for t = 0 to 63 { +// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt +// T2 = BIGSIGMA0(a) + Maj(a,b,c) +// h = g +// g = f +// f = e +// e = d + T1 +// d = c +// c = b +// b = a +// a = T1 + T2 +// } +// +// H0 = a + H0 +// H1 = b + H1 +// H2 = c + H2 +// H3 = d + H3 +// H4 = e + H4 +// H5 = f + H5 +// H6 = g + H6 +// H7 = h + H7 + +// Wt = Mt; for 0 <= t <= 15 +#define MSGSCHEDULE0(index) \ + MOVL (index*4)(SI), AX; \ + BSWAPL AX; \ + MOVL AX, (index*4)(BP) + +// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) +// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) +#define MSGSCHEDULE1(index) \ + MOVL ((index-2)*4)(BP), AX; \ + MOVL AX, CX; \ + RORL $17, AX; \ + MOVL CX, DX; \ + RORL $19, CX; \ + SHRL $10, DX; \ + MOVL ((index-15)*4)(BP), BX; \ + XORL CX, AX; \ + MOVL BX, CX; \ + XORL DX, AX; \ + RORL $7, BX; \ + MOVL CX, DX; \ + SHRL $3, DX; \ + RORL $18, CX; \ + ADDL ((index-7)*4)(BP), AX; \ + XORL CX, BX; \ + XORL DX, BX; \ + ADDL ((index-16)*4)(BP), BX; \ + ADDL BX, AX; \ + MOVL AX, ((index)*4)(BP) + +// Calculate T1 in AX - uses AX, BX, CX and DX registers. +// Wt is passed in AX. +// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt +// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) +// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) +#define SHA256T1(const, e, f, g, h) \ + MOVL (h*4)(DI), BX; \ + ADDL AX, BX; \ + MOVL (e*4)(DI), AX; \ + ADDL $const, BX; \ + MOVL (e*4)(DI), CX; \ + RORL $6, AX; \ + MOVL (e*4)(DI), DX; \ + RORL $11, CX; \ + XORL CX, AX; \ + MOVL (e*4)(DI), CX; \ + RORL $25, DX; \ + ANDL (f*4)(DI), CX; \ + XORL AX, DX; \ + MOVL (e*4)(DI), AX; \ + NOTL AX; \ + ADDL DX, BX; \ + ANDL (g*4)(DI), AX; \ + XORL CX, AX; \ + ADDL BX, AX + +// Calculate T2 in BX - uses AX, BX, CX and DX registers. +// T2 = BIGSIGMA0(a) + Maj(a, b, c) +// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) +// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) +#define SHA256T2(a, b, c) \ + MOVL (a*4)(DI), AX; \ + MOVL (c*4)(DI), BX; \ + RORL $2, AX; \ + MOVL (a*4)(DI), DX; \ + ANDL (b*4)(DI), BX; \ + RORL $13, DX; \ + MOVL (a*4)(DI), CX; \ + ANDL (c*4)(DI), CX; \ + XORL DX, AX; \ + XORL CX, BX; \ + MOVL (a*4)(DI), DX; \ + MOVL (b*4)(DI), CX; \ + RORL $22, DX; \ + ANDL (a*4)(DI), CX; \ + XORL CX, BX; \ + XORL DX, AX; \ + ADDL AX, BX + +// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. +// The values for e and a are stored in d and h, ready for rotation. +#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ + SHA256T1(const, e, f, g, h); \ + MOVL AX, 292(SP); \ + SHA256T2(a, b, c); \ + MOVL 292(SP), AX; \ + ADDL AX, BX; \ + ADDL AX, (d*4)(DI); \ + MOVL BX, (h*4)(DI) + +#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE0(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE1(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +TEXT ·block(SB),0,$296-12 + MOVL p_base+4(FP), SI + MOVL p_len+8(FP), DX + SHRL $6, DX + SHLL $6, DX + + LEAL (SI)(DX*1), DI + MOVL DI, 288(SP) + CMPL SI, DI + JEQ end + + LEAL 256(SP), DI // variables + + MOVL dig+0(FP), BP + MOVL (0*4)(BP), AX // a = H0 + MOVL AX, (0*4)(DI) + MOVL (1*4)(BP), BX // b = H1 + MOVL BX, (1*4)(DI) + MOVL (2*4)(BP), CX // c = H2 + MOVL CX, (2*4)(DI) + MOVL (3*4)(BP), DX // d = H3 + MOVL DX, (3*4)(DI) + MOVL (4*4)(BP), AX // e = H4 + MOVL AX, (4*4)(DI) + MOVL (5*4)(BP), BX // f = H5 + MOVL BX, (5*4)(DI) + MOVL (6*4)(BP), CX // g = H6 + MOVL CX, (6*4)(DI) + MOVL (7*4)(BP), DX // h = H7 + MOVL DX, (7*4)(DI) + +loop: + MOVL SP, BP // message schedule + + SHA256ROUND0(0, 0x428a2f98, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND0(1, 0x71374491, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND0(2, 0xb5c0fbcf, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND0(3, 0xe9b5dba5, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND0(4, 0x3956c25b, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND0(5, 0x59f111f1, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND0(6, 0x923f82a4, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND0(7, 0xab1c5ed5, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND0(8, 0xd807aa98, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND0(9, 0x12835b01, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND0(10, 0x243185be, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND0(11, 0x550c7dc3, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND0(12, 0x72be5d74, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND0(13, 0x80deb1fe, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND0(14, 0x9bdc06a7, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND0(15, 0xc19bf174, 1, 2, 3, 4, 5, 6, 7, 0) + + SHA256ROUND1(16, 0xe49b69c1, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(17, 0xefbe4786, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(18, 0x0fc19dc6, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(19, 0x240ca1cc, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(20, 0x2de92c6f, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(21, 0x4a7484aa, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(22, 0x5cb0a9dc, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(23, 0x76f988da, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(24, 0x983e5152, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(25, 0xa831c66d, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(26, 0xb00327c8, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(27, 0xbf597fc7, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(28, 0xc6e00bf3, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(29, 0xd5a79147, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(30, 0x06ca6351, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(31, 0x14292967, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(32, 0x27b70a85, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(33, 0x2e1b2138, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(34, 0x4d2c6dfc, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(35, 0x53380d13, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(36, 0x650a7354, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(37, 0x766a0abb, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(38, 0x81c2c92e, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(39, 0x92722c85, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(40, 0xa2bfe8a1, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(41, 0xa81a664b, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(42, 0xc24b8b70, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(43, 0xc76c51a3, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(44, 0xd192e819, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(45, 0xd6990624, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(46, 0xf40e3585, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(47, 0x106aa070, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(48, 0x19a4c116, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(49, 0x1e376c08, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(50, 0x2748774c, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(51, 0x34b0bcb5, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(52, 0x391c0cb3, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(53, 0x4ed8aa4a, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(54, 0x5b9cca4f, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(55, 0x682e6ff3, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(56, 0x748f82ee, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(57, 0x78a5636f, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(58, 0x84c87814, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(59, 0x8cc70208, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(60, 0x90befffa, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(61, 0xa4506ceb, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(62, 0xbef9a3f7, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(63, 0xc67178f2, 1, 2, 3, 4, 5, 6, 7, 0) + + MOVL dig+0(FP), BP + MOVL (0*4)(BP), AX // H0 = a + H0 + ADDL (0*4)(DI), AX + MOVL AX, (0*4)(DI) + MOVL AX, (0*4)(BP) + MOVL (1*4)(BP), BX // H1 = b + H1 + ADDL (1*4)(DI), BX + MOVL BX, (1*4)(DI) + MOVL BX, (1*4)(BP) + MOVL (2*4)(BP), CX // H2 = c + H2 + ADDL (2*4)(DI), CX + MOVL CX, (2*4)(DI) + MOVL CX, (2*4)(BP) + MOVL (3*4)(BP), DX // H3 = d + H3 + ADDL (3*4)(DI), DX + MOVL DX, (3*4)(DI) + MOVL DX, (3*4)(BP) + MOVL (4*4)(BP), AX // H4 = e + H4 + ADDL (4*4)(DI), AX + MOVL AX, (4*4)(DI) + MOVL AX, (4*4)(BP) + MOVL (5*4)(BP), BX // H5 = f + H5 + ADDL (5*4)(DI), BX + MOVL BX, (5*4)(DI) + MOVL BX, (5*4)(BP) + MOVL (6*4)(BP), CX // H6 = g + H6 + ADDL (6*4)(DI), CX + MOVL CX, (6*4)(DI) + MOVL CX, (6*4)(BP) + MOVL (7*4)(BP), DX // H7 = h + H7 + ADDL (7*4)(DI), DX + MOVL DX, (7*4)(DI) + MOVL DX, (7*4)(BP) + + ADDL $64, SI + CMPL SI, 288(SP) + JB loop + +end: + RET diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block_amd64.s b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block_amd64.s new file mode 100644 index 000000000000..666c32440a59 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block_amd64.s @@ -0,0 +1,256 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "../textflag.h" + +// SHA256 block routine. See sha256block.go for Go equivalent. +// +// The algorithm is detailed in FIPS 180-4: +// +// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf +// +// Wt = Mt; for 0 <= t <= 15 +// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// +// a = H0 +// b = H1 +// c = H2 +// d = H3 +// e = H4 +// f = H5 +// g = H6 +// h = H7 +// +// for t = 0 to 63 { +// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt +// T2 = BIGSIGMA0(a) + Maj(a,b,c) +// h = g +// g = f +// f = e +// e = d + T1 +// d = c +// c = b +// b = a +// a = T1 + T2 +// } +// +// H0 = a + H0 +// H1 = b + H1 +// H2 = c + H2 +// H3 = d + H3 +// H4 = e + H4 +// H5 = f + H5 +// H6 = g + H6 +// H7 = h + H7 + +// Wt = Mt; for 0 <= t <= 15 +#define MSGSCHEDULE0(index) \ + MOVL (index*4)(SI), AX; \ + BSWAPL AX; \ + MOVL AX, (index*4)(BP) + +// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) +// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) +#define MSGSCHEDULE1(index) \ + MOVL ((index-2)*4)(BP), AX; \ + MOVL AX, CX; \ + RORL $17, AX; \ + MOVL CX, DX; \ + RORL $19, CX; \ + SHRL $10, DX; \ + MOVL ((index-15)*4)(BP), BX; \ + XORL CX, AX; \ + MOVL BX, CX; \ + XORL DX, AX; \ + RORL $7, BX; \ + MOVL CX, DX; \ + SHRL $3, DX; \ + RORL $18, CX; \ + ADDL ((index-7)*4)(BP), AX; \ + XORL CX, BX; \ + XORL DX, BX; \ + ADDL ((index-16)*4)(BP), BX; \ + ADDL BX, AX; \ + MOVL AX, ((index)*4)(BP) + +// Calculate T1 in AX - uses AX, CX and DX registers. +// h is also used as an accumulator. Wt is passed in AX. +// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt +// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) +// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) +#define SHA256T1(const, e, f, g, h) \ + ADDL AX, h; \ + MOVL e, AX; \ + ADDL $const, h; \ + MOVL e, CX; \ + RORL $6, AX; \ + MOVL e, DX; \ + RORL $11, CX; \ + XORL CX, AX; \ + MOVL e, CX; \ + RORL $25, DX; \ + ANDL f, CX; \ + XORL AX, DX; \ + MOVL e, AX; \ + NOTL AX; \ + ADDL DX, h; \ + ANDL g, AX; \ + XORL CX, AX; \ + ADDL h, AX + +// Calculate T2 in BX - uses BX, CX, DX and DI registers. +// T2 = BIGSIGMA0(a) + Maj(a, b, c) +// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) +// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) +#define SHA256T2(a, b, c) \ + MOVL a, DI; \ + MOVL c, BX; \ + RORL $2, DI; \ + MOVL a, DX; \ + ANDL b, BX; \ + RORL $13, DX; \ + MOVL a, CX; \ + ANDL c, CX; \ + XORL DX, DI; \ + XORL CX, BX; \ + MOVL a, DX; \ + MOVL b, CX; \ + RORL $22, DX; \ + ANDL a, CX; \ + XORL CX, BX; \ + XORL DX, DI; \ + ADDL DI, BX + +// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. +// The values for e and a are stored in d and h, ready for rotation. +#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ + SHA256T1(const, e, f, g, h); \ + SHA256T2(a, b, c); \ + MOVL BX, h; \ + ADDL AX, d; \ + ADDL AX, h + +#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE0(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE1(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +TEXT ·block(SB),0,$264-32 + MOVQ p_base+8(FP), SI + MOVQ p_len+16(FP), DX + SHRQ $6, DX + SHLQ $6, DX + + LEAQ (SI)(DX*1), DI + MOVQ DI, 256(SP) + CMPQ SI, DI + JEQ end + + MOVQ dig+0(FP), BP + MOVL (0*4)(BP), R8 // a = H0 + MOVL (1*4)(BP), R9 // b = H1 + MOVL (2*4)(BP), R10 // c = H2 + MOVL (3*4)(BP), R11 // d = H3 + MOVL (4*4)(BP), R12 // e = H4 + MOVL (5*4)(BP), R13 // f = H5 + MOVL (6*4)(BP), R14 // g = H6 + MOVL (7*4)(BP), R15 // h = H7 + +loop: + MOVQ SP, BP // message schedule + + SHA256ROUND0(0, 0x428a2f98, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND0(1, 0x71374491, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND0(2, 0xb5c0fbcf, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND0(3, 0xe9b5dba5, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND0(4, 0x3956c25b, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND0(5, 0x59f111f1, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND0(6, 0x923f82a4, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND0(7, 0xab1c5ed5, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND0(8, 0xd807aa98, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND0(9, 0x12835b01, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND0(10, 0x243185be, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND0(11, 0x550c7dc3, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND0(12, 0x72be5d74, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND0(13, 0x80deb1fe, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND0(14, 0x9bdc06a7, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND0(15, 0xc19bf174, R9, R10, R11, R12, R13, R14, R15, R8) + + SHA256ROUND1(16, 0xe49b69c1, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(17, 0xefbe4786, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(18, 0x0fc19dc6, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(19, 0x240ca1cc, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(20, 0x2de92c6f, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(21, 0x4a7484aa, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(22, 0x5cb0a9dc, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(23, 0x76f988da, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(24, 0x983e5152, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(25, 0xa831c66d, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(26, 0xb00327c8, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(27, 0xbf597fc7, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(28, 0xc6e00bf3, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(29, 0xd5a79147, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(30, 0x06ca6351, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(31, 0x14292967, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(32, 0x27b70a85, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(33, 0x2e1b2138, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(34, 0x4d2c6dfc, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(35, 0x53380d13, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(36, 0x650a7354, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(37, 0x766a0abb, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(38, 0x81c2c92e, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(39, 0x92722c85, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(40, 0xa2bfe8a1, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(41, 0xa81a664b, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(42, 0xc24b8b70, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(43, 0xc76c51a3, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(44, 0xd192e819, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(45, 0xd6990624, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(46, 0xf40e3585, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(47, 0x106aa070, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(48, 0x19a4c116, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(49, 0x1e376c08, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(50, 0x2748774c, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(51, 0x34b0bcb5, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(52, 0x391c0cb3, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(53, 0x4ed8aa4a, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(54, 0x5b9cca4f, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(55, 0x682e6ff3, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(56, 0x748f82ee, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(57, 0x78a5636f, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(58, 0x84c87814, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(59, 0x8cc70208, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(60, 0x90befffa, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(61, 0xa4506ceb, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(62, 0xbef9a3f7, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(63, 0xc67178f2, R9, R10, R11, R12, R13, R14, R15, R8) + + MOVQ dig+0(FP), BP + ADDL (0*4)(BP), R8 // H0 = a + H0 + MOVL R8, (0*4)(BP) + ADDL (1*4)(BP), R9 // H1 = b + H1 + MOVL R9, (1*4)(BP) + ADDL (2*4)(BP), R10 // H2 = c + H2 + MOVL R10, (2*4)(BP) + ADDL (3*4)(BP), R11 // H3 = d + H3 + MOVL R11, (3*4)(BP) + ADDL (4*4)(BP), R12 // H4 = e + H4 + MOVL R12, (4*4)(BP) + ADDL (5*4)(BP), R13 // H5 = f + H5 + MOVL R13, (5*4)(BP) + ADDL (6*4)(BP), R14 // H6 = g + H6 + MOVL R14, (6*4)(BP) + ADDL (7*4)(BP), R15 // H7 = h + H7 + MOVL R15, (7*4)(BP) + + ADDQ $64, SI + CMPQ SI, 256(SP) + JB loop + +end: + RET diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block_decl.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block_decl.go new file mode 100644 index 000000000000..a50c9787108d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256block_decl.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 + +package sha256 + +//go:noescape + +func block(dig *digest, p []byte) diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256resume_test.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256resume_test.go new file mode 100644 index 000000000000..6d105d3f4768 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha256/sha256resume_test.go @@ -0,0 +1,57 @@ +package sha256 + +import ( + "bytes" + stdlib "crypto" + "crypto/rand" + _ "crypto/sha256" // To register the stdlib sha224 and sha256 algs. + resumable "github.com/jlhawn/go-crypto" + "io" + "testing" +) + +func compareResumableHash(t *testing.T, r resumable.Hash, h stdlib.Hash) { + // Read 3 Kilobytes of random data into a buffer. + buf := make([]byte, 3*1024) + if _, err := io.ReadFull(rand.Reader, buf); err != nil { + t.Fatalf("unable to load random data: %s", err) + } + + // Use two Hash objects to consume prefixes of the data. One will be + // snapshotted and resumed with each additional byte, then both will write + // that byte. The digests should be equal after each byte is digested. + resumableHasher := r.New() + stdlibHasher := h.New() + + // First, assert that the initial distest is the same. + if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { + t.Fatalf("initial digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) + } + + multiWriter := io.MultiWriter(resumableHasher, stdlibHasher) + + for i := 1; i <= len(buf); i++ { + + // Write the next byte. + multiWriter.Write(buf[i-1 : i]) + + if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { + t.Fatalf("digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) + } + + // Snapshot, reset, and restore the chunk hasher. + hashState, err := resumableHasher.State() + if err != nil { + t.Fatalf("unable to get state of hash function: %s", err) + } + resumableHasher.Reset() + if err := resumableHasher.Restore(hashState); err != nil { + t.Fatalf("unable to restorte state of hash function: %s", err) + } + } +} + +func TestResumable(t *testing.T) { + compareResumableHash(t, resumable.SHA224, stdlib.SHA224) + compareResumableHash(t, resumable.SHA256, stdlib.SHA256) +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/resume.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/resume.go new file mode 100644 index 000000000000..091ed9f12153 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/resume.go @@ -0,0 +1,50 @@ +package sha512 + +import ( + "bytes" + "encoding/gob" +) + +// Len returns the number of bytes which have been written to the digest. +func (d *digest) Len() uint64 { + return d.len +} + +// State returns a snapshot of the state of the digest. +func (d *digest) State() ([]byte, error) { + var buf bytes.Buffer + encoder := gob.NewEncoder(&buf) + + // We encode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + d.h, d.x, d.nx, d.len, d.is384, + } + + for _, val := range vals { + if err := encoder.Encode(val); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +// Restore resets the digest to the given state. +func (d *digest) Restore(state []byte) error { + decoder := gob.NewDecoder(bytes.NewReader(state)) + + // We decode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + &d.h, &d.x, &d.nx, &d.len, &d.is384, + } + + for _, val := range vals { + if err := decoder.Decode(val); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512.go new file mode 100644 index 000000000000..9fbb90b99c4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512.go @@ -0,0 +1,197 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha512 implements the SHA384 and SHA512 hash algorithms as defined +// in FIPS 180-2. +package sha512 + +import ( + "github.com/jlhawn/go-crypto" +) + +func init() { + crypto.RegisterHash(crypto.SHA384, New384) + crypto.RegisterHash(crypto.SHA512, New) +} + +// The size of a SHA512 checksum in bytes. +const Size = 64 + +// The size of a SHA384 checksum in bytes. +const Size384 = 48 + +// The blocksize of SHA512 and SHA384 in bytes. +const BlockSize = 128 + +const ( + chunk = 128 + init0 = 0x6a09e667f3bcc908 + init1 = 0xbb67ae8584caa73b + init2 = 0x3c6ef372fe94f82b + init3 = 0xa54ff53a5f1d36f1 + init4 = 0x510e527fade682d1 + init5 = 0x9b05688c2b3e6c1f + init6 = 0x1f83d9abfb41bd6b + init7 = 0x5be0cd19137e2179 + init0_384 = 0xcbbb9d5dc1059ed8 + init1_384 = 0x629a292a367cd507 + init2_384 = 0x9159015a3070dd17 + init3_384 = 0x152fecd8f70e5939 + init4_384 = 0x67332667ffc00b31 + init5_384 = 0x8eb44a8768581511 + init6_384 = 0xdb0c2e0d64f98fa7 + init7_384 = 0x47b5481dbefa4fa4 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [8]uint64 + x [chunk]byte + nx int + len uint64 + is384 bool // mark if this digest is SHA-384 +} + +func (d *digest) Reset() { + if !d.is384 { + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 + } else { + d.h[0] = init0_384 + d.h[1] = init1_384 + d.h[2] = init2_384 + d.h[3] = init3_384 + d.h[4] = init4_384 + d.h[5] = init5_384 + d.h[6] = init6_384 + d.h[7] = init7_384 + } + d.nx = 0 + d.len = 0 +} + +// New returns a new crypto.ResumableHash computing the SHA512 checksum. +func New() crypto.ResumableHash { + d := new(digest) + d.Reset() + return d +} + +// New384 returns a new crypto.ResumableHash computing the SHA384 checksum. +func New384() crypto.ResumableHash { + d := new(digest) + d.is384 = true + d.Reset() + return d +} + +func (d *digest) Size() int { + if !d.is384 { + return Size + } + return Size384 +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := new(digest) + *d = *d0 + hash := d.checkSum() + if d.is384 { + return append(in, hash[:Size384]...) + } + return append(in, hash[:]...) +} + +func (d *digest) checkSum() [Size]byte { + // Padding. Add a 1 bit and 0 bits until 112 bytes mod 128. + len := d.len + var tmp [128]byte + tmp[0] = 0x80 + if len%128 < 112 { + d.Write(tmp[0 : 112-len%128]) + } else { + d.Write(tmp[0 : 128+112-len%128]) + } + + // Length in bits. + len <<= 3 + for i := uint(0); i < 16; i++ { + tmp[i] = byte(len >> (120 - 8*i)) + } + d.Write(tmp[0:16]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + h := d.h[:] + if d.is384 { + h = d.h[:6] + } + + var digest [Size]byte + for i, s := range h { + digest[i*8] = byte(s >> 56) + digest[i*8+1] = byte(s >> 48) + digest[i*8+2] = byte(s >> 40) + digest[i*8+3] = byte(s >> 32) + digest[i*8+4] = byte(s >> 24) + digest[i*8+5] = byte(s >> 16) + digest[i*8+6] = byte(s >> 8) + digest[i*8+7] = byte(s) + } + + return digest +} + +// Sum512 returns the SHA512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var d digest + d.Reset() + d.Write(data) + return d.checkSum() +} + +// Sum384 returns the SHA384 checksum of the data. +func Sum384(data []byte) (sum384 [Size384]byte) { + var d digest + d.is384 = true + d.Reset() + d.Write(data) + sum := d.checkSum() + copy(sum384[:], sum[:Size384]) + return +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512_test.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512_test.go new file mode 100644 index 000000000000..541860f701b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512_test.go @@ -0,0 +1,176 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// SHA512 hash algorithm. See FIPS 180-2. + +package sha512 + +import ( + "fmt" + "io" + "testing" +) + +type sha512Test struct { + out string + in string +} + +var golden = []sha512Test{ + {"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e", ""}, + {"1f40fc92da241694750979ee6cf582f2d5d7d28e18335de05abc54d0560e0f5302860c652bf08d560252aa5e74210546f369fbbbce8c12cfc7957b2652fe9a75", "a"}, + {"2d408a0717ec188158278a796c689044361dc6fdde28d6f04973b80896e1823975cdbf12eb63f9e0591328ee235d80e9b5bf1aa6a44f4617ff3caf6400eb172d", "ab"}, + {"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f", "abc"}, + {"d8022f2060ad6efd297ab73dcc5355c9b214054b0d1776a136a669d26a7d3b14f73aa0d0ebff19ee333368f0164b6419a96da49e3e481753e7e96b716bdccb6f", "abcd"}, + {"878ae65a92e86cac011a570d4c30a7eaec442b85ce8eca0c2952b5e3cc0628c2e79d889ad4d5c7c626986d452dd86374b6ffaa7cd8b67665bef2289a5c70b0a1", "abcde"}, + {"e32ef19623e8ed9d267f657a81944b3d07adbb768518068e88435745564e8d4150a0a703be2a7d88b61e3d390c2bb97e2d4c311fdc69d6b1267f05f59aa920e7", "abcdef"}, + {"d716a4188569b68ab1b6dfac178e570114cdf0ea3a1cc0e31486c3e41241bc6a76424e8c37ab26f096fc85ef9886c8cb634187f4fddff645fb099f1ff54c6b8c", "abcdefg"}, + {"a3a8c81bc97c2560010d7389bc88aac974a104e0e2381220c6e084c4dccd1d2d17d4f86db31c2a851dc80e6681d74733c55dcd03dd96f6062cdda12a291ae6ce", "abcdefgh"}, + {"f22d51d25292ca1d0f68f69aedc7897019308cc9db46efb75a03dd494fc7f126c010e8ade6a00a0c1a5f1b75d81e0ed5a93ce98dc9b833db7839247b1d9c24fe", "abcdefghi"}, + {"ef6b97321f34b1fea2169a7db9e1960b471aa13302a988087357c520be957ca119c3ba68e6b4982c019ec89de3865ccf6a3cda1fe11e59f98d99f1502c8b9745", "abcdefghij"}, + {"2210d99af9c8bdecda1b4beff822136753d8342505ddce37f1314e2cdbb488c6016bdaa9bd2ffa513dd5de2e4b50f031393d8ab61f773b0e0130d7381e0f8a1d", "Discard medicine more than two years old."}, + {"a687a8985b4d8d0a24f115fe272255c6afaf3909225838546159c1ed685c211a203796ae8ecc4c81a5b6315919b3a64f10713da07e341fcdbb08541bf03066ce", "He who has a shady past knows that nice guys finish last."}, + {"8ddb0392e818b7d585ab22769a50df660d9f6d559cca3afc5691b8ca91b8451374e42bcdabd64589ed7c91d85f626596228a5c8572677eb98bc6b624befb7af8", "I wouldn't marry him with a ten foot pole."}, + {"26ed8f6ca7f8d44b6a8a54ae39640fa8ad5c673f70ee9ce074ba4ef0d483eea00bab2f61d8695d6b34df9c6c48ae36246362200ed820448bdc03a720366a87c6", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {"e5a14bf044be69615aade89afcf1ab0389d5fc302a884d403579d1386a2400c089b0dbb387ed0f463f9ee342f8244d5a38cfbc0e819da9529fbff78368c9a982", "The days of the digital watch are numbered. -Tom Stoppard"}, + {"420a1faa48919e14651bed45725abe0f7a58e0f099424c4e5a49194946e38b46c1f8034b18ef169b2e31050d1648e0b982386595f7df47da4b6fd18e55333015", "Nepal premier won't resign."}, + {"d926a863beadb20134db07683535c72007b0e695045876254f341ddcccde132a908c5af57baa6a6a9c63e6649bba0c213dc05fadcf9abccea09f23dcfb637fbe", "For every action there is an equal and opposite government program."}, + {"9a98dd9bb67d0da7bf83da5313dff4fd60a4bac0094f1b05633690ffa7f6d61de9a1d4f8617937d560833a9aaa9ccafe3fd24db418d0e728833545cadd3ad92d", "His money is twice tainted: 'taint yours and 'taint mine."}, + {"d7fde2d2351efade52f4211d3746a0780a26eec3df9b2ed575368a8a1c09ec452402293a8ea4eceb5a4f60064ea29b13cdd86918cd7a4faf366160b009804107", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {"b0f35ffa2697359c33a56f5c0cf715c7aeed96da9905ca2698acadb08fbc9e669bf566b6bd5d61a3e86dc22999bcc9f2224e33d1d4f32a228cf9d0349e2db518", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {"3d2e5f91778c9e66f7e061293aaa8a8fc742dd3b2e4f483772464b1144189b49273e610e5cccd7a81a19ca1fa70f16b10f1a100a4d8c1372336be8484c64b311", "size: a.out: bad magic"}, + {"b2f68ff58ac015efb1c94c908b0d8c2bf06f491e4de8e6302c49016f7f8a33eac3e959856c7fddbc464de618701338a4b46f76dbfaf9a1e5262b5f40639771c7", "The major problem is with sendmail. -Mark Horton"}, + {"d8c92db5fdf52cf8215e4df3b4909d29203ff4d00e9ad0b64a6a4e04dec5e74f62e7c35c7fb881bd5de95442123df8f57a489b0ae616bd326f84d10021121c57", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {"19a9f8dc0a233e464e8566ad3ca9b91e459a7b8c4780985b015776e1bf239a19bc233d0556343e2b0a9bc220900b4ebf4f8bdf89ff8efeaf79602d6849e6f72e", "If the enemy is within range, then so are you."}, + {"00b4c41f307bde87301cdc5b5ab1ae9a592e8ecbb2021dd7bc4b34e2ace60741cc362560bec566ba35178595a91932b8d5357e2c9cec92d393b0fa7831852476", "It's well we cannot hear the screams/That we create in others' dreams."}, + {"91eccc3d5375fd026e4d6787874b1dce201cecd8a27dbded5065728cb2d09c58a3d467bb1faf353bf7ba567e005245d5321b55bc344f7c07b91cb6f26c959be7", "You remind me of a TV show, but that's all right: I watch it anyway."}, + {"fabbbe22180f1f137cfdc9556d2570e775d1ae02a597ded43a72a40f9b485d500043b7be128fb9fcd982b83159a0d99aa855a9e7cc4240c00dc01a9bdf8218d7", "C is as portable as Stonehedge!!"}, + {"2ecdec235c1fa4fc2a154d8fba1dddb8a72a1ad73838b51d792331d143f8b96a9f6fcb0f34d7caa351fe6d88771c4f105040e0392f06e0621689d33b2f3ba92e", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {"7ad681f6f96f82f7abfa7ecc0334e8fa16d3dc1cdc45b60b7af43fe4075d2357c0c1d60e98350f1afb1f2fe7a4d7cd2ad55b88e458e06b73c40b437331f5dab4", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {"833f9248ab4a3b9e5131f745fda1ffd2dd435b30e965957e78291c7ab73605fd1912b0794e5c233ab0a12d205a39778d19b83515d6a47003f19cdee51d98c7e0", "How can you write a big system without C++? -Paul Glick"}, +} + +var golden384 = []sha512Test{ + {"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b", ""}, + {"54a59b9f22b0b80880d8427e548b7c23abd873486e1f035dce9cd697e85175033caa88e6d57bc35efae0b5afd3145f31", "a"}, + {"c7be03ba5bcaa384727076db0018e99248e1a6e8bd1b9ef58a9ec9dd4eeebb3f48b836201221175befa74ddc3d35afdd", "ab"}, + {"cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7", "abc"}, + {"1165b3406ff0b52a3d24721f785462ca2276c9f454a116c2b2ba20171a7905ea5a026682eb659c4d5f115c363aa3c79b", "abcd"}, + {"4c525cbeac729eaf4b4665815bc5db0c84fe6300068a727cf74e2813521565abc0ec57a37ee4d8be89d097c0d2ad52f0", "abcde"}, + {"c6a4c65b227e7387b9c3e839d44869c4cfca3ef583dea64117859b808c1e3d8ae689e1e314eeef52a6ffe22681aa11f5", "abcdef"}, + {"9f11fc131123f844c1226f429b6a0a6af0525d9f40f056c7fc16cdf1b06bda08e302554417a59fa7dcf6247421959d22", "abcdefg"}, + {"9000cd7cada59d1d2eb82912f7f24e5e69cc5517f68283b005fa27c285b61e05edf1ad1a8a9bded6fd29eb87d75ad806", "abcdefgh"}, + {"ef54915b60cf062b8dd0c29ae3cad69abe6310de63ac081f46ef019c5c90897caefd79b796cfa81139788a260ded52df", "abcdefghi"}, + {"a12070030a02d86b0ddacd0d3a5b598344513d0a051e7355053e556a0055489c1555399b03342845c4adde2dc44ff66c", "abcdefghij"}, + {"86f58ec2d74d1b7f8eb0c2ff0967316699639e8d4eb129de54bdf34c96cdbabe200d052149f2dd787f43571ba74670d4", "Discard medicine more than two years old."}, + {"ae4a2b639ca9bfa04b1855d5a05fe7f230994f790891c6979103e2605f660c4c1262a48142dcbeb57a1914ba5f7c3fa7", "He who has a shady past knows that nice guys finish last."}, + {"40ae213df6436eca952aa6841886fcdb82908ef1576a99c8f49bb9dd5023169f7c53035abdda0b54c302f4974e2105e7", "I wouldn't marry him with a ten foot pole."}, + {"e7cf8b873c9bc950f06259aa54309f349cefa72c00d597aebf903e6519a50011dfe355afff064a10701c705693848df9", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {"c3d4f0f4047181c7d39d34703365f7bf70207183caf2c2f6145f04da895ef69124d9cdeb635da636c3a474e61024e29b", "The days of the digital watch are numbered. -Tom Stoppard"}, + {"a097aab567e167d5cf93676ed73252a69f9687cb3179bb2d27c9878119e94bf7b7c4b58dc90582edfaf66e11388ed714", "Nepal premier won't resign."}, + {"5026ca45c41fc64712eb65065da92f6467541c78f8966d3fe2c8e3fb769a3ec14215f819654b47bd64f7f0eac17184f3", "For every action there is an equal and opposite government program."}, + {"ac1cc0f5ac8d5f5514a7b738ac322b7fb52a161b449c3672e9b6a6ad1a5e4b26b001cf3bad24c56598676ca17d4b445a", "His money is twice tainted: 'taint yours and 'taint mine."}, + {"722d10c5de371ec0c8c4b5247ac8a5f1d240d68c73f8da13d8b25f0166d6f309bf9561979a111a0049405771d201941a", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {"dc2d3ea18bfa10549c63bf2b75b39b5167a80c12aff0e05443168ea87ff149fb0eda5e0bd234eb5d48c7d02ffc5807f1", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {"1d67c969e2a945ae5346d2139760261504d4ba164c522443afe19ef3e29b152a4c52445489cfc9d7215e5a450e8e1e4e", "size: a.out: bad magic"}, + {"5ff8e075e465646e7b73ef36d812c6e9f7d60fa6ea0e533e5569b4f73cde53cdd2cc787f33540af57cca3fe467d32fe0", "The major problem is with sendmail. -Mark Horton"}, + {"5bd0a997a67c9ae1979a894eb0cde403dde003c9b6f2c03cf21925c42ff4e1176e6df1ca005381612ef18457b9b7ec3b", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {"1eee6da33e7e54fc5be52ae23b94b16ba4d2a947ae4505c6a3edfc7401151ea5205ac01b669b56f27d8ef7f175ed7762", "If the enemy is within range, then so are you."}, + {"76b06e9dea66bfbb1a96029426dc0dfd7830bd297eb447ff5358d94a87cd00c88b59df2493fef56ecbb5231073892ea9", "It's well we cannot hear the screams/That we create in others' dreams."}, + {"12acaf21452cff586143e3f5db0bfdf7802c057e1adf2a619031c4e1b0ccc4208cf6cef8fe722bbaa2fb46a30d9135d8", "You remind me of a TV show, but that's all right: I watch it anyway."}, + {"0fc23d7f4183efd186f0bc4fc5db867e026e2146b06cb3d52f4bdbd57d1740122caa853b41868b197b2ac759db39df88", "C is as portable as Stonehedge!!"}, + {"bc805578a7f85d34a86a32976e1c34fe65cf815186fbef76f46ef99cda10723f971f3f1464d488243f5e29db7488598d", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {"b23918399a12ebf4431559eec3813eaf7412e875fd7464f16d581e473330842d2e96c6be49a7ce3f9bb0b8bc0fcbe0fe", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {"1764b700eb1ead52a2fc33cc28975c2180f1b8faa5038d94cffa8d78154aab16e91dd787e7b0303948ebed62561542c8", "How can you write a big system without C++? -Paul Glick"}, +} + +func TestGolden(t *testing.T) { + for i := 0; i < len(golden); i++ { + g := golden[i] + s := fmt.Sprintf("%x", Sum512([]byte(g.in))) + if s != g.out { + t.Fatalf("Sum512 function: sha512(%s) = %s want %s", g.in, s, g.out) + } + c := New() + for j := 0; j < 3; j++ { + if j < 2 { + io.WriteString(c, g.in) + } else { + io.WriteString(c, g.in[0:len(g.in)/2]) + c.Sum(nil) + io.WriteString(c, g.in[len(g.in)/2:]) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != g.out { + t.Fatalf("sha512[%d](%s) = %s want %s", j, g.in, s, g.out) + } + c.Reset() + } + } + for i := 0; i < len(golden384); i++ { + g := golden384[i] + s := fmt.Sprintf("%x", Sum384([]byte(g.in))) + if s != g.out { + t.Fatalf("Sum384 function: sha384(%s) = %s want %s", g.in, s, g.out) + } + c := New384() + for j := 0; j < 3; j++ { + if j < 2 { + io.WriteString(c, g.in) + } else { + io.WriteString(c, g.in[0:len(g.in)/2]) + c.Sum(nil) + io.WriteString(c, g.in[len(g.in)/2:]) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != g.out { + t.Fatalf("sha384[%d](%s) = %s want %s", j, g.in, s, g.out) + } + c.Reset() + } + } +} + +func TestSize(t *testing.T) { + c := New() + if got := c.Size(); got != Size { + t.Errorf("Size = %d; want %d", got, Size) + } + c = New384() + if got := c.Size(); got != Size384 { + t.Errorf("New384.Size = %d; want %d", got, Size384) + } +} + +func TestBlockSize(t *testing.T) { + c := New() + if got := c.BlockSize(); got != BlockSize { + t.Errorf("BlockSize = %d; want %d", got, BlockSize) + } +} + +var bench = New() +var buf = make([]byte, 8192) + +func benchmarkSize(b *testing.B, size int) { + b.SetBytes(int64(size)) + sum := make([]byte, bench.Size()) + for i := 0; i < b.N; i++ { + bench.Reset() + bench.Write(buf[:size]) + bench.Sum(sum[:0]) + } +} + +func BenchmarkHash8Bytes(b *testing.B) { + benchmarkSize(b, 8) +} + +func BenchmarkHash1K(b *testing.B) { + benchmarkSize(b, 1024) +} + +func BenchmarkHash8K(b *testing.B) { + benchmarkSize(b, 8192) +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512block.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512block.go new file mode 100644 index 000000000000..648ae8f7e1f4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512block.go @@ -0,0 +1,144 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 + +// SHA512 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package sha512 + +var _K = []uint64{ + 0x428a2f98d728ae22, + 0x7137449123ef65cd, + 0xb5c0fbcfec4d3b2f, + 0xe9b5dba58189dbbc, + 0x3956c25bf348b538, + 0x59f111f1b605d019, + 0x923f82a4af194f9b, + 0xab1c5ed5da6d8118, + 0xd807aa98a3030242, + 0x12835b0145706fbe, + 0x243185be4ee4b28c, + 0x550c7dc3d5ffb4e2, + 0x72be5d74f27b896f, + 0x80deb1fe3b1696b1, + 0x9bdc06a725c71235, + 0xc19bf174cf692694, + 0xe49b69c19ef14ad2, + 0xefbe4786384f25e3, + 0x0fc19dc68b8cd5b5, + 0x240ca1cc77ac9c65, + 0x2de92c6f592b0275, + 0x4a7484aa6ea6e483, + 0x5cb0a9dcbd41fbd4, + 0x76f988da831153b5, + 0x983e5152ee66dfab, + 0xa831c66d2db43210, + 0xb00327c898fb213f, + 0xbf597fc7beef0ee4, + 0xc6e00bf33da88fc2, + 0xd5a79147930aa725, + 0x06ca6351e003826f, + 0x142929670a0e6e70, + 0x27b70a8546d22ffc, + 0x2e1b21385c26c926, + 0x4d2c6dfc5ac42aed, + 0x53380d139d95b3df, + 0x650a73548baf63de, + 0x766a0abb3c77b2a8, + 0x81c2c92e47edaee6, + 0x92722c851482353b, + 0xa2bfe8a14cf10364, + 0xa81a664bbc423001, + 0xc24b8b70d0f89791, + 0xc76c51a30654be30, + 0xd192e819d6ef5218, + 0xd69906245565a910, + 0xf40e35855771202a, + 0x106aa07032bbd1b8, + 0x19a4c116b8d2d0c8, + 0x1e376c085141ab53, + 0x2748774cdf8eeb99, + 0x34b0bcb5e19b48a8, + 0x391c0cb3c5c95a63, + 0x4ed8aa4ae3418acb, + 0x5b9cca4f7763e373, + 0x682e6ff3d6b2b8a3, + 0x748f82ee5defb2fc, + 0x78a5636f43172f60, + 0x84c87814a1f0ab72, + 0x8cc702081a6439ec, + 0x90befffa23631e28, + 0xa4506cebde82bde9, + 0xbef9a3f7b2c67915, + 0xc67178f2e372532b, + 0xca273eceea26619c, + 0xd186b8c721c0c207, + 0xeada7dd6cde0eb1e, + 0xf57d4f7fee6ed178, + 0x06f067aa72176fba, + 0x0a637dc5a2c898a6, + 0x113f9804bef90dae, + 0x1b710b35131c471b, + 0x28db77f523047d84, + 0x32caab7b40c72493, + 0x3c9ebe0a15c9bebc, + 0x431d67c49c100d4c, + 0x4cc5d4becb3e42b6, + 0x597f299cfc657e2a, + 0x5fcb6fab3ad6faec, + 0x6c44198c4a475817, +} + +func block(dig *digest, p []byte) { + var w [80]uint64 + h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] + for len(p) >= chunk { + for i := 0; i < 16; i++ { + j := i * 8 + w[i] = uint64(p[j])<<56 | uint64(p[j+1])<<48 | uint64(p[j+2])<<40 | uint64(p[j+3])<<32 | + uint64(p[j+4])<<24 | uint64(p[j+5])<<16 | uint64(p[j+6])<<8 | uint64(p[j+7]) + } + for i := 16; i < 80; i++ { + v1 := w[i-2] + t1 := (v1>>19 | v1<<(64-19)) ^ (v1>>61 | v1<<(64-61)) ^ (v1 >> 6) + v2 := w[i-15] + t2 := (v2>>1 | v2<<(64-1)) ^ (v2>>8 | v2<<(64-8)) ^ (v2 >> 7) + + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + + a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 + + for i := 0; i < 80; i++ { + t1 := h + ((e>>14 | e<<(64-14)) ^ (e>>18 | e<<(64-18)) ^ (e>>41 | e<<(64-41))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + + t2 := ((a>>28 | a<<(64-28)) ^ (a>>34 | a<<(64-34)) ^ (a>>39 | a<<(64-39))) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + h5 += f + h6 += g + h7 += h + + p = p[chunk:] + } + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512block_amd64.s b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512block_amd64.s new file mode 100644 index 000000000000..6f71602398c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512block_amd64.s @@ -0,0 +1,273 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "../textflag.h" + +// SHA512 block routine. See sha512block.go for Go equivalent. +// +// The algorithm is detailed in FIPS 180-4: +// +// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf +// +// Wt = Mt; for 0 <= t <= 15 +// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 +// +// a = H0 +// b = H1 +// c = H2 +// d = H3 +// e = H4 +// f = H5 +// g = H6 +// h = H7 +// +// for t = 0 to 79 { +// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt +// T2 = BIGSIGMA0(a) + Maj(a,b,c) +// h = g +// g = f +// f = e +// e = d + T1 +// d = c +// c = b +// b = a +// a = T1 + T2 +// } +// +// H0 = a + H0 +// H1 = b + H1 +// H2 = c + H2 +// H3 = d + H3 +// H4 = e + H4 +// H5 = f + H5 +// H6 = g + H6 +// H7 = h + H7 + +// Wt = Mt; for 0 <= t <= 15 +#define MSGSCHEDULE0(index) \ + MOVQ (index*8)(SI), AX; \ + BSWAPQ AX; \ + MOVQ AX, (index*8)(BP) + +// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 +// SIGMA0(x) = ROTR(1,x) XOR ROTR(8,x) XOR SHR(7,x) +// SIGMA1(x) = ROTR(19,x) XOR ROTR(61,x) XOR SHR(6,x) +#define MSGSCHEDULE1(index) \ + MOVQ ((index-2)*8)(BP), AX; \ + MOVQ AX, CX; \ + RORQ $19, AX; \ + MOVQ CX, DX; \ + RORQ $61, CX; \ + SHRQ $6, DX; \ + MOVQ ((index-15)*8)(BP), BX; \ + XORQ CX, AX; \ + MOVQ BX, CX; \ + XORQ DX, AX; \ + RORQ $1, BX; \ + MOVQ CX, DX; \ + SHRQ $7, DX; \ + RORQ $8, CX; \ + ADDQ ((index-7)*8)(BP), AX; \ + XORQ CX, BX; \ + XORQ DX, BX; \ + ADDQ ((index-16)*8)(BP), BX; \ + ADDQ BX, AX; \ + MOVQ AX, ((index)*8)(BP) + +// Calculate T1 in AX - uses AX, CX and DX registers. +// h is also used as an accumulator. Wt is passed in AX. +// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt +// BIGSIGMA1(x) = ROTR(14,x) XOR ROTR(18,x) XOR ROTR(41,x) +// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) +#define SHA512T1(const, e, f, g, h) \ + MOVQ $const, DX; \ + ADDQ AX, h; \ + MOVQ e, AX; \ + ADDQ DX, h; \ + MOVQ e, CX; \ + RORQ $14, AX; \ + MOVQ e, DX; \ + RORQ $18, CX; \ + XORQ CX, AX; \ + MOVQ e, CX; \ + RORQ $41, DX; \ + ANDQ f, CX; \ + XORQ AX, DX; \ + MOVQ e, AX; \ + NOTQ AX; \ + ADDQ DX, h; \ + ANDQ g, AX; \ + XORQ CX, AX; \ + ADDQ h, AX + +// Calculate T2 in BX - uses BX, CX, DX and DI registers. +// T2 = BIGSIGMA0(a) + Maj(a, b, c) +// BIGSIGMA0(x) = ROTR(28,x) XOR ROTR(34,x) XOR ROTR(39,x) +// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) +#define SHA512T2(a, b, c) \ + MOVQ a, DI; \ + MOVQ c, BX; \ + RORQ $28, DI; \ + MOVQ a, DX; \ + ANDQ b, BX; \ + RORQ $34, DX; \ + MOVQ a, CX; \ + ANDQ c, CX; \ + XORQ DX, DI; \ + XORQ CX, BX; \ + MOVQ a, DX; \ + MOVQ b, CX; \ + RORQ $39, DX; \ + ANDQ a, CX; \ + XORQ CX, BX; \ + XORQ DX, DI; \ + ADDQ DI, BX + +// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. +// The values for e and a are stored in d and h, ready for rotation. +#define SHA512ROUND(index, const, a, b, c, d, e, f, g, h) \ + SHA512T1(const, e, f, g, h); \ + SHA512T2(a, b, c); \ + MOVQ BX, h; \ + ADDQ AX, d; \ + ADDQ AX, h + +#define SHA512ROUND0(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE0(index); \ + SHA512ROUND(index, const, a, b, c, d, e, f, g, h) + +#define SHA512ROUND1(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE1(index); \ + SHA512ROUND(index, const, a, b, c, d, e, f, g, h) + +TEXT ·block(SB),0,$648-32 + MOVQ p_base+8(FP), SI + MOVQ p_len+16(FP), DX + SHRQ $7, DX + SHLQ $7, DX + + LEAQ (SI)(DX*1), DI + MOVQ DI, 640(SP) + CMPQ SI, DI + JEQ end + + MOVQ dig+0(FP), BP + MOVQ (0*8)(BP), R8 // a = H0 + MOVQ (1*8)(BP), R9 // b = H1 + MOVQ (2*8)(BP), R10 // c = H2 + MOVQ (3*8)(BP), R11 // d = H3 + MOVQ (4*8)(BP), R12 // e = H4 + MOVQ (5*8)(BP), R13 // f = H5 + MOVQ (6*8)(BP), R14 // g = H6 + MOVQ (7*8)(BP), R15 // h = H7 + +loop: + MOVQ SP, BP // message schedule + + SHA512ROUND0(0, 0x428a2f98d728ae22, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND0(1, 0x7137449123ef65cd, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND0(2, 0xb5c0fbcfec4d3b2f, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND0(3, 0xe9b5dba58189dbbc, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND0(4, 0x3956c25bf348b538, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND0(5, 0x59f111f1b605d019, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND0(6, 0x923f82a4af194f9b, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND0(7, 0xab1c5ed5da6d8118, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND0(8, 0xd807aa98a3030242, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND0(9, 0x12835b0145706fbe, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND0(10, 0x243185be4ee4b28c, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND0(11, 0x550c7dc3d5ffb4e2, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND0(12, 0x72be5d74f27b896f, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND0(13, 0x80deb1fe3b1696b1, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND0(14, 0x9bdc06a725c71235, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND0(15, 0xc19bf174cf692694, R9, R10, R11, R12, R13, R14, R15, R8) + + SHA512ROUND1(16, 0xe49b69c19ef14ad2, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(17, 0xefbe4786384f25e3, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(18, 0x0fc19dc68b8cd5b5, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(19, 0x240ca1cc77ac9c65, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(20, 0x2de92c6f592b0275, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(21, 0x4a7484aa6ea6e483, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(22, 0x5cb0a9dcbd41fbd4, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(23, 0x76f988da831153b5, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(24, 0x983e5152ee66dfab, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(25, 0xa831c66d2db43210, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(26, 0xb00327c898fb213f, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(27, 0xbf597fc7beef0ee4, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(28, 0xc6e00bf33da88fc2, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(29, 0xd5a79147930aa725, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(30, 0x06ca6351e003826f, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(31, 0x142929670a0e6e70, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(32, 0x27b70a8546d22ffc, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(33, 0x2e1b21385c26c926, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(34, 0x4d2c6dfc5ac42aed, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(35, 0x53380d139d95b3df, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(36, 0x650a73548baf63de, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(37, 0x766a0abb3c77b2a8, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(38, 0x81c2c92e47edaee6, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(39, 0x92722c851482353b, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(40, 0xa2bfe8a14cf10364, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(41, 0xa81a664bbc423001, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(42, 0xc24b8b70d0f89791, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(43, 0xc76c51a30654be30, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(44, 0xd192e819d6ef5218, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(45, 0xd69906245565a910, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(46, 0xf40e35855771202a, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(47, 0x106aa07032bbd1b8, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(48, 0x19a4c116b8d2d0c8, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(49, 0x1e376c085141ab53, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(50, 0x2748774cdf8eeb99, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(51, 0x34b0bcb5e19b48a8, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(52, 0x391c0cb3c5c95a63, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(53, 0x4ed8aa4ae3418acb, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(54, 0x5b9cca4f7763e373, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(55, 0x682e6ff3d6b2b8a3, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(56, 0x748f82ee5defb2fc, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(57, 0x78a5636f43172f60, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(58, 0x84c87814a1f0ab72, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(59, 0x8cc702081a6439ec, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(60, 0x90befffa23631e28, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(61, 0xa4506cebde82bde9, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(62, 0xbef9a3f7b2c67915, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(63, 0xc67178f2e372532b, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(64, 0xca273eceea26619c, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(65, 0xd186b8c721c0c207, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(66, 0xeada7dd6cde0eb1e, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(67, 0xf57d4f7fee6ed178, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(68, 0x06f067aa72176fba, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(69, 0x0a637dc5a2c898a6, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(70, 0x113f9804bef90dae, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(71, 0x1b710b35131c471b, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(72, 0x28db77f523047d84, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(73, 0x32caab7b40c72493, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(74, 0x3c9ebe0a15c9bebc, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(75, 0x431d67c49c100d4c, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(76, 0x4cc5d4becb3e42b6, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(77, 0x597f299cfc657e2a, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(78, 0x5fcb6fab3ad6faec, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(79, 0x6c44198c4a475817, R9, R10, R11, R12, R13, R14, R15, R8) + + MOVQ dig+0(FP), BP + ADDQ (0*8)(BP), R8 // H0 = a + H0 + MOVQ R8, (0*8)(BP) + ADDQ (1*8)(BP), R9 // H1 = b + H1 + MOVQ R9, (1*8)(BP) + ADDQ (2*8)(BP), R10 // H2 = c + H2 + MOVQ R10, (2*8)(BP) + ADDQ (3*8)(BP), R11 // H3 = d + H3 + MOVQ R11, (3*8)(BP) + ADDQ (4*8)(BP), R12 // H4 = e + H4 + MOVQ R12, (4*8)(BP) + ADDQ (5*8)(BP), R13 // H5 = f + H5 + MOVQ R13, (5*8)(BP) + ADDQ (6*8)(BP), R14 // H6 = g + H6 + MOVQ R14, (6*8)(BP) + ADDQ (7*8)(BP), R15 // H7 = h + H7 + MOVQ R15, (7*8)(BP) + + ADDQ $128, SI + CMPQ SI, 640(SP) + JB loop + +end: + RET diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512block_decl.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512block_decl.go new file mode 100644 index 000000000000..bef99de2e461 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512block_decl.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 + +package sha512 + +//go:noescape + +func block(dig *digest, p []byte) diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512resume_test.go b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512resume_test.go new file mode 100644 index 000000000000..b30f7aab98ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/sha512/sha512resume_test.go @@ -0,0 +1,57 @@ +package sha512 + +import ( + "bytes" + stdlib "crypto" + "crypto/rand" + _ "crypto/sha512" // To register the stdlib sha224 and sha256 algs. + resumable "github.com/jlhawn/go-crypto" + "io" + "testing" +) + +func compareResumableHash(t *testing.T, r resumable.Hash, h stdlib.Hash) { + // Read 3 Kilobytes of random data into a buffer. + buf := make([]byte, 3*1024) + if _, err := io.ReadFull(rand.Reader, buf); err != nil { + t.Fatalf("unable to load random data: %s", err) + } + + // Use two Hash objects to consume prefixes of the data. One will be + // snapshotted and resumed with each additional byte, then both will write + // that byte. The digests should be equal after each byte is digested. + resumableHasher := r.New() + stdlibHasher := h.New() + + // First, assert that the initial distest is the same. + if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { + t.Fatalf("initial digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) + } + + multiWriter := io.MultiWriter(resumableHasher, stdlibHasher) + + for i := 1; i <= len(buf); i++ { + + // Write the next byte. + multiWriter.Write(buf[i-1 : i]) + + if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { + t.Fatalf("digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) + } + + // Snapshot, reset, and restore the chunk hasher. + hashState, err := resumableHasher.State() + if err != nil { + t.Fatalf("unable to get state of hash function: %s", err) + } + resumableHasher.Reset() + if err := resumableHasher.Restore(hashState); err != nil { + t.Fatalf("unable to restorte state of hash function: %s", err) + } + } +} + +func TestResumable(t *testing.T) { + compareResumableHash(t, resumable.SHA384, stdlib.SHA384) + compareResumableHash(t, resumable.SHA512, stdlib.SHA512) +} diff --git a/Godeps/_workspace/src/github.com/jlhawn/go-crypto/textflag.h b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/textflag.h new file mode 100644 index 000000000000..2a76e76c2965 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jlhawn/go-crypto/textflag.h @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file defines flags attached to various functions +// and data objects. The compilers, assemblers, and linker must +// all agree on these values. + +// Don't profile the marked routine. This flag is deprecated. +#define NOPROF 1 +// It is ok for the linker to get multiple of these symbols. It will +// pick one of the duplicates to use. +#define DUPOK 2 +// Don't insert stack check preamble. +#define NOSPLIT 4 +// Put this data in a read-only section. +#define RODATA 8 +// This data contains no pointers. +#define NOPTR 16 +// This is a wrapper function and should not count as disabling 'recover'. +#define WRAPPER 32 +// This function uses its incoming context register. +#define NEEDCTXT 64 diff --git a/README.md b/README.md index b8214d6af3e1..13e5b32ab53d 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,26 @@ For more information on the security of containers, see these articles: Running untrusted containers will become less scary as improvements are made upstream to Docker and Kubernetes, but until then please be conscious of the images you run. Consider using images from trusted parties, building them yourself on OpenShift, or only running containers that run as non-root users. +Docker 1.6 +---------- +OpenShift now requires at least Docker 1.6. Here's how to get it: + +### Fedora 21 +RPMs for Docker 1.6 are available for Fedora 21 in the updates yum repository. + +### CentOS 7 +Docker 1.6 is not yet available in the CentOS 7 Extras yum repository yet. In the meantime, you will need to install it from https://mirror.openshift.com/pub/openshift-v3/dependencies/centos7/x86_64/. Create `/etc/yum.repos.d/openshift-v3-dependencies.repo` with these contents + + [openshift-v3-dependencies] + name=OpenShift V3 Dependencies + baseurl=https://mirror.openshift.com/pub/openshift-v3/dependencies/centos7/x86_64/ + enabled=1 + metadata_expire=7d + gpgcheck=0 + +You will now be able to `yum install` or `yum update` Docker to 1.6. + + Getting Started --------------- The simplest way to run OpenShift Origin is in a Docker container: diff --git a/assets/app/scripts/directives/util.js b/assets/app/scripts/directives/util.js index 2e5e8ec44fd4..5212e2f74276 100644 --- a/assets/app/scripts/directives/util.js +++ b/assets/app/scripts/directives/util.js @@ -84,4 +84,4 @@ angular.module('openshiftConsole') }, templateUrl: 'views/directives/_custom-icon.html' } - }); \ No newline at end of file + }); diff --git a/assets/app/scripts/filters/resources.js b/assets/app/scripts/filters/resources.js index c524061fdb05..25110134e9a0 100644 --- a/assets/app/scripts/filters/resources.js +++ b/assets/app/scripts/filters/resources.js @@ -76,12 +76,31 @@ angular.module('openshiftConsole') }; }) .filter('imageName', function() { + // takes an image name and strips off the leading : from it, + // if it exists. + return function(image) { + if (!image) { + return ""; + } + + if (!image.contains(":")) { + return image; + } + + return image.split(":")[1]; + } + }) + .filter('imageStreamName', function() { return function(image) { if (!image) { return ""; } // TODO move this parsing method into a utility method - var slashSplit = image.split("/"); + + // remove @sha256:.... + var imageWithoutID = image.split("@")[0] + + var slashSplit = imageWithoutID.split("/"); var semiColonSplit; if (slashSplit.length === 3) { semiColonSplit = slashSplit[2].split(":"); @@ -90,10 +109,10 @@ angular.module('openshiftConsole') else if (slashSplit.length === 2) { // TODO umm tough... this could be registry/imageName or imageRepo/imageName // have to check if the first bit matches a registry pattern, will handle this later... - return image; + return imageWithoutID; } else if (slashSplit.length === 1) { - semiColonSplit = image.split(":"); + semiColonSplit = imageWithoutID.split(":"); return semiColonSplit[0]; } }; diff --git a/assets/app/views/_pod-template.html b/assets/app/views/_pod-template.html index 061e72cf355a..98b518413059 100644 --- a/assets/app/views/_pod-template.html +++ b/assets/app/views/_pod-template.html @@ -13,9 +13,7 @@

{{container.name}}

- Image: {{container.image | imageName}} - ({{imagesByDockerReference[container.image].metadata.name.substr(0, 10)}}) - + Image: {{container.image | imageStreamName}}
@@ -77,4 +75,4 @@

{{container.name}}

-
\ No newline at end of file + diff --git a/assets/app/views/images.html b/assets/app/views/images.html index 55377bf6e758..8b20eb428396 100644 --- a/assets/app/views/images.html +++ b/assets/app/views/images.html @@ -2,12 +2,12 @@

Image Streams

- +
{{emptyMessage}}
-
+
@@ -15,9 +15,9 @@

Image Streams

No images for this tag - + created - +
@@ -32,7 +32,7 @@

Image Streams

- + {{image.dockerImageReference}} created @@ -40,7 +40,7 @@

Image Streams

-
+
diff --git a/assets/app/views/project.html b/assets/app/views/project.html index b6780bf9fba2..fe582fda0341 100644 --- a/assets/app/views/project.html +++ b/assets/app/views/project.html @@ -65,7 +65,7 @@

{{serviceId}} , triggered by - new image for {{cause.imageTrigger.repositoryName | imageName}}:{{cause.imageTrigger.tag}} + new image for {{cause.imageTrigger.repositoryName | imageStreamName}}:{{cause.imageTrigger.tag}} deployment configuration change @@ -134,7 +134,7 @@

, triggered by - new image for {{cause.imageTrigger.repositoryName | imageName}}:{{cause.imageTrigger.tag}} + new image for {{cause.imageTrigger.repositoryName | imageStreamName}}:{{cause.imageTrigger.tag}} deployment configuration change diff --git a/hack/build-images.sh b/hack/build-images.sh index 22049f2fe0a6..78e9387945b7 100755 --- a/hack/build-images.sh +++ b/hack/build-images.sh @@ -54,10 +54,8 @@ image openshift/origin-pod images/pod # images that depend on openshift/origin-base image openshift/origin images/origin image openshift/origin-haproxy-router images/router/haproxy -# For now, don't build the v2 registry image -# To be reenabled when we actually switch to the v2 registry -#image openshift/origin-docker-registry images/dockerregistry image openshift/origin-keepalived-ipfailover images/ipfailover/keepalived +image openshift/origin-docker-registry images/dockerregistry # images that depend on openshift/origin image openshift/origin-deployer images/deployer image openshift/origin-docker-builder images/builder/docker/docker-builder diff --git a/hack/push-release.sh b/hack/push-release.sh index da3cafd38ff1..05a466dbd65b 100755 --- a/hack/push-release.sh +++ b/hack/push-release.sh @@ -43,6 +43,8 @@ images=( openshift/origin-pod openshift/origin-deployer openshift/origin-docker-builder + openshift/origin-docker-registry + openshift/origin-keepalived-ipfailover openshift/origin-sti-builder openshift/origin-haproxy-router openshift/hello-openshift diff --git a/hack/test-end-to-end.sh b/hack/test-end-to-end.sh index f908a69661b6..138ea0d3146f 100755 --- a/hack/test-end-to-end.sh +++ b/hack/test-end-to-end.sh @@ -260,6 +260,7 @@ openshift admin policy add-role-to-user view e2e-user --namespace=default openshift admin new-project test --description="This is an example project to demonstrate OpenShift v3" --admin="e2e-user" openshift admin new-project docker --description="This is an example project to demonstrate OpenShift v3" --admin="e2e-user" openshift admin new-project custom --description="This is an example project to demonstrate OpenShift v3" --admin="e2e-user" +openshift admin new-project cache --description="This is an example project to demonstrate OpenShift v3" --admin="e2e-user" echo "The console should be available at ${API_SCHEME}://${PUBLIC_MASTER_HOST}:${API_PORT}/console." echo "Log in as 'e2e-user' to see the 'test' project." @@ -270,13 +271,10 @@ openshift admin router --create --credentials="${MASTER_CONFIG_DIR}/openshift-ro # install the registry. The --mount-host option is provided to reuse local storage. echo "[INFO] Installing the registry" -# TODO: add --images="${USE_IMAGES}" when the Docker registry is built alongside OpenShift -openshift admin registry --create --credentials="${MASTER_CONFIG_DIR}/openshift-registry.kubeconfig" --mount-host="/tmp/openshift.local.registry" --images='openshift/origin-${component}:latest' +openshift admin registry --create --credentials="${MASTER_CONFIG_DIR}/openshift-registry.kubeconfig" --mount-host="/tmp/openshift.local.registry" --images="${USE_IMAGES}" echo "[INFO] Pre-pulling and pushing ruby-20-centos7" docker pull openshift/ruby-20-centos7:latest -# TODO: remove after this becomes part of the build -docker pull openshift/origin-docker-registry echo "[INFO] Pulled ruby-20-centos7" echo "[INFO] Waiting for Docker registry pod to start" @@ -287,30 +285,44 @@ wait_for_command '[[ "$(osc get endpoints docker-registry --output-version=v1bet DOCKER_REGISTRY=$(osc get --output-version=v1beta3 --template="{{ .spec.portalIP }}:{{ with index .spec.ports 0 }}{{ .port }}{{ end }}" service docker-registry) echo "[INFO] Verifying the docker-registry is up at ${DOCKER_REGISTRY}" -wait_for_url_timed "http://${DOCKER_REGISTRY}" "[INFO] Docker registry says: " $((2*TIME_MIN)) +wait_for_url_timed "http://${DOCKER_REGISTRY}/healthz" "[INFO] Docker registry says: " $((2*TIME_MIN)) [ "$(dig @${API_HOST} "docker-registry.default.local." A)" ] -docker tag -f openshift/ruby-20-centos7:latest ${DOCKER_REGISTRY}/test/ruby-20-centos7:latest -docker push ${DOCKER_REGISTRY}/test/ruby-20-centos7:latest +# Client setup (log in as e2e-user and set 'test' as the default project) +# This is required to be able to push to the registry! +echo "[INFO] Logging in as a regular user (e2e-user:pass) with project 'test'..." +osc login -u e2e-user -p pass +osc project cache +token=$(osc config view --flatten -o template -t '{{range .users}}{{if eq .name "e2e-user"}}{{.user.token}}{{end}}{{end}}') +[[ -n ${token} ]] + +# TODO reenable this once we've got docker push secrets 100% ready +#docker login -u e2e-user -p ${token} -e e2e-user@openshift.com ${DOCKER_REGISTRY} +# TODO remove the following line once we've got docker push secrets 100% ready +echo '{"apiVersion": "v1beta1", "kind": "ImageStream", "metadata": {"name": "ruby-20-centos7"}}' | osc create -f - + +docker tag -f openshift/ruby-20-centos7:latest ${DOCKER_REGISTRY}/cache/ruby-20-centos7:latest +docker push ${DOCKER_REGISTRY}/cache/ruby-20-centos7:latest echo "[INFO] Pushed ruby-20-centos7" +echo "[INFO] Back to 'master' context with 'admin' user..." +osc project default + # Process template and create echo "[INFO] Submitting application template json for processing..." osc process -n test -f examples/sample-app/application-template-stibuild.json > "${STI_CONFIG_FILE}" osc process -n docker -f examples/sample-app/application-template-dockerbuild.json > "${DOCKER_CONFIG_FILE}" osc process -n custom -f examples/sample-app/application-template-custombuild.json > "${CUSTOM_CONFIG_FILE}" -# Client setup (log in as e2e-user and set 'test' as the default project) -echo "[INFO] Logging in as a regular user (e2e-user:pass) with project 'test'..." -osc login -u e2e-user -p pass +echo "[INFO] Back to 'test' context with 'e2e-user' user" osc project test echo "[INFO] Applying STI application config" osc create -f "${STI_CONFIG_FILE}" -# this needs to be done before waiting fo the build because right now only cluster-admins can see build logs, because that uses proxy +# this needs to be done before waiting for the build because right now only cluster-admins can see build logs, because that uses proxy echo "[INFO] Back to 'master' context with 'admin' user..." osc project default @@ -349,7 +361,7 @@ osc exec -p ${registry_pod} whoami | grep root # Port forwarding echo "[INFO] Validating port-forward" osc port-forward -p ${registry_pod} 5001:5000 &> "${LOG_DIR}/port-forward.log" & -wait_for_url_timed "http://localhost:5001/" "[INFO] Docker registry says: " $((10*TIME_SEC)) +wait_for_url_timed "http://localhost:5001/healthz" "[INFO] Docker registry says: " $((10*TIME_SEC)) # UI e2e tests can be found in assets/test/e2e if [[ "$TEST_ASSETS" == "true" ]]; then diff --git a/images/dockerregistry/Dockerfile b/images/dockerregistry/Dockerfile index 42529d3d4d64..c7c88acf23f0 100644 --- a/images/dockerregistry/Dockerfile +++ b/images/dockerregistry/Dockerfile @@ -3,7 +3,7 @@ FROM openshift/origin-base ADD config.yml /config.yml ADD bin/dockerregistry /dockerregistry -ENV REGISTRY_CONFIGURATION_PATH=/config.yml +ENV REGISTRY_CONFIGURATION_PATH=/config.yml DISABLE_USER_AUTH=true EXPOSE 5000 VOLUME /registry diff --git a/images/dockerregistry/config.yml b/images/dockerregistry/config.yml index c9337b942dc4..7d05e11504ab 100644 --- a/images/dockerregistry/config.yml +++ b/images/dockerregistry/config.yml @@ -1,8 +1,11 @@ version: 0.1 -loglevel: debug +log: + level: debug http: addr: :5000 storage: + cache: + layerinfo: inmemory filesystem: rootdirectory: /registry auth: diff --git a/openshift.spec b/openshift.spec index fc43f51d34af..c8ef5c50898e 100644 --- a/openshift.spec +++ b/openshift.spec @@ -55,7 +55,7 @@ Requires(postun): systemd %package node Summary: OpenShift Node Requires: %{name} = %{version}-%{release} -Requires: docker-io >= 1.3.2 +Requires: docker-io >= 1.6.0 Requires: tuned-profiles-openshift-node Requires: util-linux Requires: socat diff --git a/pkg/assets/bindata.go b/pkg/assets/bindata.go index 585ffb83aea7..00b82f686553 100644 --- a/pkg/assets/bindata.go +++ b/pkg/assets/bindata.go @@ -16736,9 +16736,13 @@ return e ? e :"template" === c ? "fa fa-bolt" :"image" === c ? "fa fa-cube" :""; }; } ]).filter("imageName", function() { return function(a) { +return a ? a.contains(":") ? a.split(":")[1] :a :""; +}; +}).filter("imageStreamName", function() { +return function(a) { if (!a) return ""; -var b, c = a.split("/"); -return 3 === c.length ? (b = c[2].split(":"), c[1] + "/" + b[0]) :2 === c.length ? a :1 === c.length ? (b = a.split(":"), b[0]) :void 0; +var b, c = a.split("@")[0], d = c.split("/"); +return 3 === d.length ? (b = d[2].split(":"), d[1] + "/" + b[0]) :2 === d.length ? c :1 === d.length ? (b = c.split(":"), b[0]) :void 0; }; }).filter("imageEnv", function() { return function(a, b) { @@ -62440,9 +62444,7 @@ var _views_pod_template_html = []byte(`

{{container.name}}

-Image: {{container.image | imageName}} - ({{imagesByDockerReference[container.image].metadata.name.substr(0, 10)}}) - +Image: {{container.image | imageStreamName}}
@@ -63649,7 +63651,7 @@ var _views_images_html = []byte(`
No images for this tag - + created @@ -63666,7 +63668,7 @@ var _views_images_html = []byte(`
1">
- + {{image.dockerImageReference}} created @@ -63851,7 +63853,7 @@ and {{numRemaining}} {{numRemaining == 1 ? "other" : "others"}} , triggered by -new image for {{cause.imageTrigger.repositoryName | imageName}}:{{cause.imageTrigger.tag}} +new image for {{cause.imageTrigger.repositoryName | imageStreamName}}:{{cause.imageTrigger.tag}} deployment configuration change @@ -63910,7 +63912,7 @@ and {{numRemaining}} {{numRemaining == 1 ? "other" : "others"}} , triggered by -new image for {{cause.imageTrigger.repositoryName | imageName}}:{{cause.imageTrigger.tag}} +new image for {{cause.imageTrigger.repositoryName | imageStreamName}}:{{cause.imageTrigger.tag}} deployment configuration change diff --git a/pkg/cmd/dockerregistry/dockerregistry.go b/pkg/cmd/dockerregistry/dockerregistry.go index c0a4fb9782f0..08128a0ce0d6 100644 --- a/pkg/cmd/dockerregistry/dockerregistry.go +++ b/pkg/cmd/dockerregistry/dockerregistry.go @@ -1,23 +1,42 @@ package dockerregistry import ( + "crypto/tls" + "crypto/x509" + "fmt" "io" + "io/ioutil" "net/http" "os" log "github.com/Sirupsen/logrus" "github.com/docker/distribution/configuration" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" + "github.com/docker/distribution/health" "github.com/docker/distribution/registry/handlers" _ "github.com/docker/distribution/registry/storage/driver/filesystem" _ "github.com/docker/distribution/registry/storage/driver/s3" "github.com/docker/distribution/version" gorillahandlers "github.com/gorilla/handlers" - _ "github.com/openshift/origin/pkg/dockerregistry/auth" - _ "github.com/openshift/origin/pkg/dockerregistry/middleware/repository" - "golang.org/x/net/context" + _ "github.com/openshift/origin/pkg/dockerregistry/server" ) +type healthHandler struct { + delegate http.Handler +} + +func newHealthHandler(delegate http.Handler) http.Handler { + return &healthHandler{delegate} +} + +func (h *healthHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if req.URL.Path == "/healthz" { + health.StatusHandler(w, req) + return + } + h.delegate.ServeHTTP(w, req) +} + // Execute runs the Docker registry. func Execute(configFile io.Reader) { config, err := configuration.Parse(configFile) @@ -25,29 +44,61 @@ func Execute(configFile io.Reader) { log.Fatalf("Error parsing configuration file: %s", err) } - logLevel, err := log.ParseLevel(string(config.Loglevel)) + logLevel, err := log.ParseLevel(string(config.Log.Level)) if err != nil { - log.Errorf("Error parsing log level %q: %s", config.Loglevel, err) + log.Errorf("Error parsing log level %q: %s", config.Log.Level, err) logLevel = log.InfoLevel } log.SetLevel(logLevel) + log.Infof("version=%s", version.Version) ctx := context.Background() - ctx = context.WithValue(ctx, "version", version.Version) - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "version")) app := handlers.NewApp(ctx, *config) - handler := gorillahandlers.CombinedLoggingHandler(os.Stdout, app) + handler := newHealthHandler(app) + handler = gorillahandlers.CombinedLoggingHandler(os.Stdout, handler) if config.HTTP.TLS.Certificate == "" { - ctxu.GetLogger(app).Infof("listening on %v", config.HTTP.Addr) + context.GetLogger(app).Infof("listening on %v", config.HTTP.Addr) if err := http.ListenAndServe(config.HTTP.Addr, handler); err != nil { - ctxu.GetLogger(app).Fatalln(err) + context.GetLogger(app).Fatalln(err) } } else { - ctxu.GetLogger(app).Infof("listening on %v, tls", config.HTTP.Addr) - if err := http.ListenAndServeTLS(config.HTTP.Addr, config.HTTP.TLS.Certificate, config.HTTP.TLS.Key, handler); err != nil { - ctxu.GetLogger(app).Fatalln(err) + tlsConf := &tls.Config{ + ClientAuth: tls.NoClientCert, + } + + if len(config.HTTP.TLS.ClientCAs) != 0 { + pool := x509.NewCertPool() + + for _, ca := range config.HTTP.TLS.ClientCAs { + caPem, err := ioutil.ReadFile(ca) + if err != nil { + context.GetLogger(app).Fatalln(err) + } + + if ok := pool.AppendCertsFromPEM(caPem); !ok { + context.GetLogger(app).Fatalln(fmt.Errorf("Could not add CA to pool")) + } + } + + for _, subj := range pool.Subjects() { + context.GetLogger(app).Debugf("CA Subject: %s", string(subj)) + } + + tlsConf.ClientAuth = tls.RequireAndVerifyClientCert + tlsConf.ClientCAs = pool + } + + context.GetLogger(app).Infof("listening on %v, tls", config.HTTP.Addr) + server := &http.Server{ + Addr: config.HTTP.Addr, + Handler: handler, + TLSConfig: tlsConf, + } + + if err := server.ListenAndServeTLS(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key); err != nil { + context.GetLogger(app).Fatalln(err) } } } diff --git a/pkg/cmd/experimental/registry/registry.go b/pkg/cmd/experimental/registry/registry.go index cee46f845dbd..84004a4aa39b 100644 --- a/pkg/cmd/experimental/registry/registry.go +++ b/pkg/cmd/experimental/registry/registry.go @@ -12,6 +12,7 @@ import ( kclientcmd "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd" cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" "github.com/spf13/cobra" @@ -198,6 +199,16 @@ func NewCmdRegistry(f *clientcmd.Factory, parentName, name string, out io.Writer }, }, Privileged: mountHost, + LivenessProbe: &kapi.Probe{ + InitialDelaySeconds: 3, + TimeoutSeconds: 5, + Handler: kapi.Handler{ + HTTPGet: &kapi.HTTPGetAction{ + Path: "/healthz", + Port: util.NewIntOrStringFromInt(5000), + }, + }, + }, }, }, Volumes: []kapi.Volume{ diff --git a/pkg/cmd/server/bootstrappolicy/policy.go b/pkg/cmd/server/bootstrappolicy/policy.go index 53622a79a93d..c6238a251503 100644 --- a/pkg/cmd/server/bootstrappolicy/policy.go +++ b/pkg/cmd/server/bootstrappolicy/policy.go @@ -186,9 +186,17 @@ func GetBootstrapMasterRoles(masterNamespace string) []authorizationapi.Role { Verbs: util.NewStringSet("get", "delete"), Resources: util.NewStringSet("images"), }, + { + Verbs: util.NewStringSet("get"), + Resources: util.NewStringSet("imagestreamimages", "imagestreamtags", "imagestreams"), + }, + { + Verbs: util.NewStringSet("update"), + Resources: util.NewStringSet("imagestreams"), + }, { Verbs: util.NewStringSet("create"), - Resources: util.NewStringSet("imagerepositorymappings"), + Resources: util.NewStringSet("imagerepositorymappings", "imagestreammappings"), }, }, }, diff --git a/pkg/cmd/server/kubernetes/node.go b/pkg/cmd/server/kubernetes/node.go index 234d5a48de2c..28fc8e335fb2 100644 --- a/pkg/cmd/server/kubernetes/node.go +++ b/pkg/cmd/server/kubernetes/node.go @@ -47,6 +47,8 @@ func (ce defaultCommandExecutor) Run(command string, args ...string) error { return c.Run() } +const minimumDockerAPIVersionWithPullByID = "1.18" + // EnsureDocker attempts to connect to the Docker daemon defined by the helper, // and if it is unable to it will print a warning. func (c *NodeConfig) EnsureDocker(docker *dockerutil.Helper) { @@ -59,6 +61,27 @@ func (c *NodeConfig) EnsureDocker(docker *dockerutil.Helper) { c.DockerClient = &dockertools.FakeDockerClient{VersionInfo: dockerclient.Env{"apiversion=1.15"}} } else { glog.Infof("Connecting to Docker at %s", dockerAddr) + + env, err := dockerClient.Version() + if err != nil { + glog.Fatalf("ERROR: Unable to check for Docker server version.\n%v", err) + } + + serverVersionString := env.Get("ApiVersion") + serverVersion, err := dockerclient.NewAPIVersion(serverVersionString) + if err != nil { + glog.Fatalf("ERROR: Unable to determine Docker server version from %q.\n%v", serverVersionString, err) + } + + minimumPullByIDVersion, err := dockerclient.NewAPIVersion(minimumDockerAPIVersionWithPullByID) + if err != nil { + glog.Fatalf("ERROR: Unable to check for Docker server version.\n%v", err) + } + + if serverVersion.LessThan(minimumPullByIDVersion) { + glog.Fatal("ERROR: Docker 1.6 or later (server API version 1.18 or later) required.") + } + c.DockerClient = dockerClient } } diff --git a/pkg/dockerregistry/auth/openshift.go b/pkg/dockerregistry/server/auth.go similarity index 65% rename from pkg/dockerregistry/auth/openshift.go rename to pkg/dockerregistry/server/auth.go index b8912b120655..1c9a73981c58 100644 --- a/pkg/dockerregistry/auth/openshift.go +++ b/pkg/dockerregistry/server/auth.go @@ -1,17 +1,18 @@ -package auth +package server import ( "encoding/base64" "errors" "fmt" "net/http" + "os" "strings" log "github.com/Sirupsen/logrus" ctxu "github.com/docker/distribution/context" registryauth "github.com/docker/distribution/registry/auth" authorizationapi "github.com/openshift/origin/pkg/authorization/api" - "github.com/openshift/origin/pkg/dockerregistry" + "github.com/openshift/origin/pkg/client" "golang.org/x/net/context" ) @@ -21,15 +22,15 @@ func init() { type contextKey int -var bearerTokenKey contextKey = 0 +var userClientKey contextKey = 0 -func WithBearerToken(parent context.Context, bearerToken string) context.Context { - return context.WithValue(parent, bearerTokenKey, bearerToken) +func WithUserClient(parent context.Context, userClient *client.Client) context.Context { + return context.WithValue(parent, userClientKey, userClient) } -func BearerTokenFrom(ctx context.Context) (string, bool) { - bearerToken, ok := ctx.Value(bearerTokenKey).(string) - return bearerToken, ok +func UserClientFrom(ctx context.Context) (*client.Client, bool) { + userClient, ok := ctx.Value(userClientKey).(*client.Client) + return userClient, ok } type AccessController struct { @@ -55,7 +56,7 @@ var ( ) func newAccessController(options map[string]interface{}) (registryauth.AccessController, error) { - fmt.Println("Using OpenShift Auth handler") + log.Info("Using OpenShift Auth handler") realm, ok := options["realm"].(string) if !ok { // Default to openshift if not present @@ -85,40 +86,58 @@ func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Authorized handles checking whether the given request is authorized // for actions on resources allowed by openshift. func (ac *AccessController) Authorized(ctx context.Context, accessRecords ...registryauth.Access) (context.Context, error) { - req, err := ctxu.GetRequest(ctx) - if err != nil { - return nil, err - } + var ( + client *client.Client + err error + ) + challenge := &authChallenge{realm: ac.realm} - authParts := strings.SplitN(req.Header.Get("Authorization"), " ", 2) - if len(authParts) != 2 || strings.ToLower(authParts[0]) != "basic" { - challenge.err = ErrTokenRequired - return nil, challenge - } - basicToken := authParts[1] + if os.Getenv("DISABLE_USER_AUTH") == "true" { + client, err = NewRegistryOpenShiftClient() + if err != nil { + return nil, err + } + } else { + req, err := ctxu.GetRequest(ctx) + if err != nil { + return nil, err + } - payload, err := base64.StdEncoding.DecodeString(basicToken) - if err != nil { - log.Errorf("Basic token decode failed: %s", err) - challenge.err = ErrTokenInvalid - return nil, challenge - } - osAuthParts := strings.SplitN(string(payload), ":", 2) - if len(osAuthParts) != 2 { - challenge.err = ErrOpenShiftTokenRequired - return nil, challenge - } - user := osAuthParts[0] - bearerToken := osAuthParts[1] + authParts := strings.SplitN(req.Header.Get("Authorization"), " ", 2) + if len(authParts) != 2 || strings.ToLower(authParts[0]) != "basic" { + challenge.err = ErrTokenRequired + return nil, challenge + } + basicToken := authParts[1] - // In case of docker login, hits endpoint /v2 - if len(accessRecords) == 0 { - err = VerifyOpenShiftUser(user, bearerToken) + payload, err := base64.StdEncoding.DecodeString(basicToken) if err != nil { - challenge.err = err + log.Errorf("Basic token decode failed: %s", err) + challenge.err = ErrTokenInvalid + return nil, challenge + } + osAuthParts := strings.SplitN(string(payload), ":", 2) + if len(osAuthParts) != 2 { + challenge.err = ErrOpenShiftTokenRequired return nil, challenge } + user := osAuthParts[0] + bearerToken := osAuthParts[1] + + client, err = NewUserOpenShiftClient(bearerToken) + if err != nil { + return nil, err + } + + // In case of docker login, hits endpoint /v2 + if len(accessRecords) == 0 { + err = VerifyOpenShiftUser(user, client) + if err != nil { + challenge.err = err + return nil, challenge + } + } } for _, access := range accessRecords { @@ -137,45 +156,37 @@ func (ac *AccessController) Authorized(ctx context.Context, accessRecords ...reg verb := "" switch access.Action { case "push": - verb = "create" + verb = "update" case "pull": verb = "get" default: - challenge.err = fmt.Errorf("Unkown action: %s", access.Action) + challenge.err = fmt.Errorf("Unknown action: %s", access.Action) return nil, challenge } - err = VerifyOpenShiftAccess(repoParts[0], repoParts[1], verb, bearerToken) + err = VerifyOpenShiftAccess(repoParts[0], repoParts[1], verb, client) if err != nil { challenge.err = err return nil, challenge } } - return WithBearerToken(ctx, bearerToken), nil + return WithUserClient(ctx, client), nil } -func VerifyOpenShiftUser(user, bearerToken string) error { - client, err := dockerregistry.NewUserOpenShiftClient(bearerToken) - if err != nil { - return err - } +func VerifyOpenShiftUser(user string, client *client.Client) error { userObj, err := client.Users().Get("~") if err != nil { log.Errorf("Get user failed with error: %s", err) return ErrOpenShiftAccessDenied } - if user != userObj.ObjectMeta.Name { + if user != userObj.Name { log.Errorf("Token valid but user name mismatch") return ErrOpenShiftAccessDenied } return nil } -func VerifyOpenShiftAccess(namespace, imageRepo, verb, bearerToken string) error { - client, err := dockerregistry.NewUserOpenShiftClient(bearerToken) - if err != nil { - return err - } +func VerifyOpenShiftAccess(namespace, imageRepo, verb string, client *client.Client) error { sar := authorizationapi.SubjectAccessReview{ Verb: verb, Resource: "imageStreams", diff --git a/pkg/dockerregistry/auth/openshift_test.go b/pkg/dockerregistry/server/auth_test.go similarity index 97% rename from pkg/dockerregistry/auth/openshift_test.go rename to pkg/dockerregistry/server/auth_test.go index f53acd46c644..758fea3eedc2 100644 --- a/pkg/dockerregistry/auth/openshift_test.go +++ b/pkg/dockerregistry/server/auth_test.go @@ -1,4 +1,4 @@ -package auth +package server import ( "fmt" @@ -40,7 +40,11 @@ func TestVerifyOpenShiftAccess(t *testing.T) { } for _, test := range tests { server := simulateOpenShiftMaster(test.openshiftStatusCode, test.openshiftResponse) - err := VerifyOpenShiftAccess("foo", "bar", "create", "magic bearer token") + client, err := NewUserOpenShiftClient("magic bearer token") + if err != nil { + t.Fatal(err) + } + err = VerifyOpenShiftAccess("foo", "bar", "create", client) if err == nil || test.expectedError == nil { if err != test.expectedError { t.Fatal("VerifyOpenShiftAccess did not get expected error - got %s - expected %s", err, test.expectedError) diff --git a/pkg/dockerregistry/helpers.go b/pkg/dockerregistry/server/openshiftclient.go similarity index 98% rename from pkg/dockerregistry/helpers.go rename to pkg/dockerregistry/server/openshiftclient.go index a6aa30f5bc5b..5e2452aacc46 100644 --- a/pkg/dockerregistry/helpers.go +++ b/pkg/dockerregistry/server/openshiftclient.go @@ -1,4 +1,4 @@ -package dockerregistry +package server import ( "errors" diff --git a/pkg/dockerregistry/middleware/repository/openshift.go b/pkg/dockerregistry/server/repositorymiddleware.go similarity index 79% rename from pkg/dockerregistry/middleware/repository/openshift.go rename to pkg/dockerregistry/server/repositorymiddleware.go index 0fb908a8efb7..314d0ae1eb31 100644 --- a/pkg/dockerregistry/middleware/repository/openshift.go +++ b/pkg/dockerregistry/server/repositorymiddleware.go @@ -1,4 +1,4 @@ -package repository +package server import ( "encoding/json" @@ -17,8 +17,6 @@ import ( repomw "github.com/docker/distribution/registry/middleware/repository" "github.com/docker/libtrust" "github.com/openshift/origin/pkg/client" - "github.com/openshift/origin/pkg/dockerregistry" - "github.com/openshift/origin/pkg/dockerregistry/auth" imageapi "github.com/openshift/origin/pkg/image/api" "golang.org/x/net/context" ) @@ -43,7 +41,7 @@ func newRepository(repo distribution.Repository, options map[string]interface{}) return nil, errors.New("REGISTRY_URL is required") } - registryClient, err := dockerregistry.NewRegistryOpenShiftClient() + registryClient, err := NewRegistryOpenShiftClient() if err != nil { return nil, err } @@ -117,44 +115,22 @@ func (r *repository) Get(ctx context.Context, dgst digest.Digest) (*manifest.Sig // Get retrieves the named manifest, if it exists. func (r *repository) GetByTag(ctx context.Context, tag string) (*manifest.SignedManifest, error) { - var image *imageapi.Image - if imageStreamTag, err := r.getImageStreamTag(ctx, tag); err == nil { - image = &imageStreamTag.Image - } else { - // TODO remove when docker 1.6 is out - // Since docker 1.5 doesn't support pull by id, we're simulating pull by id - // against the v2 registry by using a pull spec of the form - // :, so once we verify we got a 404 from - // getImageStreamTag, we construct a digest and attempt to get the - // imageStreamImage using that digest. - if err, ok := err.(*kerrors.StatusError); !ok { - log.Errorf("GetByTag: getImageStreamTag returned error: %s", err) - return nil, err - } else if err.ErrStatus.Code != http.StatusNotFound { - log.Errorf("GetByTag: getImageStreamTag returned non-404: %s", err) - } - - // let's try to get by id - dgst, dgstErr := digest.ParseDigest("sha256:" + tag) - if dgstErr != nil { - log.Errorf("GetByTag: unable to parse digest: %s", dgstErr) - return nil, err - } - imageStreamImage, err := r.getImageStreamImage(ctx, dgst) - if err != nil { - log.Errorf("GetByTag: getImageStreamImage returned error: %s", err) - return nil, err - } - image = &imageStreamImage.Image + imageStreamTag, err := r.getImageStreamTag(ctx, tag) + if err != nil { + log.Errorf("Error getting ImageStreamTag %q: %v", tag, err) + return nil, err } + image := &imageStreamTag.Image - dgst, err := digest.ParseDigest(image.Name) + dgst, err := digest.ParseDigest(imageStreamTag.ImageName) if err != nil { + log.Errorf("Error parsing digest %q: %v", imageStreamTag.ImageName, err) return nil, err } image, err = r.getImage(dgst) if err != nil { + log.Errorf("Error getting image %q: %v", dgst.String(), err) return nil, err } @@ -211,9 +187,9 @@ func (r *repository) Put(ctx context.Context, manifest *manifest.SignedManifest) }, } - client, err := getUserOpenShiftClient(ctx) - if err != nil { - log.Errorf("Error creating user client to auto provision image stream: %s", err) + client, ok := UserClientFrom(ctx) + if !ok { + log.Errorf("Error creating user client to auto provision image stream: OpenShift user client unavailable") return statusErr } @@ -252,9 +228,9 @@ func (r *repository) Delete(ctx context.Context, dgst digest.Digest) error { // getImageStream retrieves the ImageStream for r. func (r *repository) getImageStream(ctx context.Context) (*imageapi.ImageStream, error) { - client, err := getUserOpenShiftClient(ctx) - if err != nil { - return nil, err + client, ok := UserClientFrom(ctx) + if !ok { + return nil, fmt.Errorf("Error retrieving image stream: OpenShift user client unavailable") } return client.ImageStreams(r.namespace).Get(r.name) } @@ -269,9 +245,9 @@ func (r *repository) getImage(dgst digest.Digest) (*imageapi.Image, error) { // getImageStreamTag retrieves the Image with tag `tag` for the ImageStream // associated with r. func (r *repository) getImageStreamTag(ctx context.Context, tag string) (*imageapi.ImageStreamTag, error) { - client, err := getUserOpenShiftClient(ctx) - if err != nil { - return nil, err + client, ok := UserClientFrom(ctx) + if !ok { + return nil, fmt.Errorf("Error retrieving image stream tag: OpenShift user client unavailable") } return client.ImageStreamTags(r.namespace).Get(r.name, tag) } @@ -279,9 +255,9 @@ func (r *repository) getImageStreamTag(ctx context.Context, tag string) (*imagea // getImageStreamImage retrieves the Image with digest `dgst` for the ImageStream // associated with r. This ensures the user has access to the image. func (r *repository) getImageStreamImage(ctx context.Context, dgst digest.Digest) (*imageapi.ImageStreamImage, error) { - client, err := getUserOpenShiftClient(ctx) - if err != nil { - return nil, err + client, ok := UserClientFrom(ctx) + if !ok { + return nil, fmt.Errorf("Error retrieving image stream image: OpenShift user client unavailable") } return client.ImageStreamImages(r.namespace).Get(r.name, dgst.String()) } @@ -316,11 +292,3 @@ func (r *repository) manifestFromImage(image *imageapi.Image) (*manifest.SignedM } return &sm, err } - -func getUserOpenShiftClient(ctx context.Context) (*client.Client, error) { - bearerToken, ok := auth.BearerTokenFrom(ctx) - if !ok { - return nil, errors.New("unable to create user OpenShift client: bearer token missing") - } - return dockerregistry.NewUserOpenShiftClient(bearerToken) -} diff --git a/pkg/image/api/helper.go b/pkg/image/api/helper.go index f86ddf8e894a..6ab7dba720b5 100644 --- a/pkg/image/api/helper.go +++ b/pkg/image/api/helper.go @@ -3,7 +3,6 @@ package api import ( "encoding/json" "fmt" - "os" "strings" "github.com/docker/distribution/digest" @@ -100,31 +99,8 @@ func (r DockerImageReference) Minimal() DockerImageReference { return r } -var dockerPullSpecGenerator pullSpecGenerator - // String converts a DockerImageReference to a Docker pull spec. func (r DockerImageReference) String() string { - if dockerPullSpecGenerator == nil { - if len(os.Getenv("OPENSHIFT_REAL_PULL_BY_ID")) > 0 { - dockerPullSpecGenerator = &realByIdPullSpecGenerator{} - } else { - dockerPullSpecGenerator = &simulatedByIdPullSpecGenerator{} - } - } - return dockerPullSpecGenerator.pullSpec(r) -} - -// pullSpecGenerator converts a DockerImageReference to a Docker pull spec. -type pullSpecGenerator interface { - pullSpec(ref DockerImageReference) string -} - -// simulatedByIdPullSpecGenerator simulates pull by ID against a v2 registry -// by generating a pull spec where the "tag" is the hex portion of the -// DockerImageReference's ID. -type simulatedByIdPullSpecGenerator struct{} - -func (f *simulatedByIdPullSpecGenerator) pullSpec(r DockerImageReference) string { registry := r.Registry if len(registry) > 0 { registry += "/" @@ -139,11 +115,11 @@ func (f *simulatedByIdPullSpecGenerator) pullSpec(r DockerImageReference) string if len(r.Tag) > 0 { ref = ":" + r.Tag } else if len(r.ID) > 0 { - if d, err := digest.ParseDigest(r.ID); err == nil { - // if it parses as a digest, treat it like a by-id tag without the algorithm - ref = ":" + d.Hex() + if _, err := digest.ParseDigest(r.ID); err == nil { + // if it parses as a digest, it's v2 pull by id + ref = "@" + r.ID } else { - // if it doesn't parse, it's presumably a v1 registry by-id tag + // if it doesn't parse as a digest, it's presumably a v1 registry by-id tag ref = ":" + r.ID } } @@ -151,31 +127,6 @@ func (f *simulatedByIdPullSpecGenerator) pullSpec(r DockerImageReference) string return fmt.Sprintf("%s%s%s%s", registry, r.Namespace, r.Name, ref) } -// realByIdPullSpecGenerator generates real pull by ID pull specs against -// a v2 registry using the @ format. -type realByIdPullSpecGenerator struct{} - -func (*realByIdPullSpecGenerator) pullSpec(r DockerImageReference) string { - registry := r.Registry - if len(registry) > 0 { - registry += "/" - } - - if len(r.Namespace) == 0 { - r.Namespace = DockerDefaultNamespace - } - r.Namespace += "/" - - var ref string - if len(r.Tag) > 0 { - ref = ":" + r.Tag - } else if len(r.ID) > 0 { - ref = "@" + r.ID - } - - return fmt.Sprintf("%s%s%s%s", registry, r.Namespace, r.Name, ref) -} - // ImageWithMetadata returns a copy of image with the DockerImageMetadata filled in // from the raw DockerImageManifest data stored in the image. func ImageWithMetadata(image Image) (*Image, error) { diff --git a/pkg/image/api/helper_test.go b/pkg/image/api/helper_test.go index e3939b55243b..b9658f36f276 100644 --- a/pkg/image/api/helper_test.go +++ b/pkg/image/api/helper_test.go @@ -2,7 +2,6 @@ package api import ( "fmt" - "os" "reflect" "testing" "time" @@ -161,103 +160,12 @@ func TestDockerImageReferenceString(t *testing.T) { { Name: "foo", ID: "sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", - Expected: "library/foo:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", - }, - { - Namespace: "bar", - Name: "foo", - Expected: "bar/foo", - }, - { - Namespace: "bar", - Name: "foo", - Tag: "tag", - Expected: "bar/foo:tag", - }, - { - Namespace: "bar", - Name: "foo", - ID: "sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", - Expected: "bar/foo:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", - }, - { - Registry: "bar", - Namespace: "foo", - Name: "baz", - Expected: "bar/foo/baz", - }, - { - Registry: "bar", - Namespace: "foo", - Name: "baz", - Tag: "tag", - Expected: "bar/foo/baz:tag", - }, - { - Registry: "bar", - Namespace: "foo", - Name: "baz", - ID: "sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", - Expected: "bar/foo/baz:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", - }, - { - Registry: "bar:5000", - Namespace: "foo", - Name: "baz", - Expected: "bar:5000/foo/baz", - }, - { - Registry: "bar:5000", - Namespace: "foo", - Name: "baz", - Tag: "tag", - Expected: "bar:5000/foo/baz:tag", - }, - { - Registry: "bar:5000", - Namespace: "foo", - Name: "baz", - ID: "sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", - Expected: "bar:5000/foo/baz:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", - }, - } - - for i, testCase := range testCases { - ref := DockerImageReference{ - Registry: testCase.Registry, - Namespace: testCase.Namespace, - Name: testCase.Name, - Tag: testCase.Tag, - ID: testCase.ID, - } - actual := ref.String() - if e, a := testCase.Expected, actual; e != a { - t.Errorf("%d: expected %q, got %q", i, e, a) - } - } -} - -func TestDockerImageReferenceStringWithRealPullByID(t *testing.T) { - os.Setenv("OPENSHIFT_REAL_PULL_BY_ID", "1") - dockerPullSpecGenerator = nil - - testCases := []struct { - Registry, Namespace, Name, Tag, ID string - Expected string - }{ - { - Name: "foo", - Expected: "library/foo", - }, - { - Name: "foo", - Tag: "tag", - Expected: "library/foo:tag", + Expected: "library/foo@sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", }, { Name: "foo", - ID: "sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", - Expected: "library/foo@sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + ID: "3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + Expected: "library/foo:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", }, { Namespace: "bar", diff --git a/pkg/image/api/types.go b/pkg/image/api/types.go index eae5ff0eff14..93582c37704d 100644 --- a/pkg/image/api/types.go +++ b/pkg/image/api/types.go @@ -169,7 +169,8 @@ type ImageRepositoryTag struct { // ImageStreamTag exists to allow calls to `osc get imageStreamTag ...` to function. type ImageStreamTag struct { - Image `json:",inline"` + Image `json:",inline"` + ImageName string } // DefaultImageTag is used when an image tag is needed and the configuration does not specify a tag to use. diff --git a/pkg/image/api/v1beta1/types.go b/pkg/image/api/v1beta1/types.go index 7f398bce29a7..ea1e1482343c 100644 --- a/pkg/image/api/v1beta1/types.go +++ b/pkg/image/api/v1beta1/types.go @@ -171,7 +171,8 @@ type ImageRepositoryTag struct { // ImageStreamTag exists to allow calls to `osc get imageStreamTag ...` to function. type ImageStreamTag struct { - Image `json:",inline"` + Image `json:",inline"` + ImageName string `json:"imageName"` } // ImageStreamImage exists to allow calls to `osc get imageStreamImage ...` to function. diff --git a/pkg/image/api/v1beta3/types.go b/pkg/image/api/v1beta3/types.go index 230ec85e5e6b..1442c32e7951 100644 --- a/pkg/image/api/v1beta3/types.go +++ b/pkg/image/api/v1beta3/types.go @@ -113,6 +113,7 @@ type ImageRepositoryTag struct { // ImageStreamTag exists to allow calls to `osc get imageStreamTag ...` to function. type ImageStreamTag struct { Image + ImageName string `json:"imageName"` } // ImageStreamImage exists to allow calls to `osc get imageStreamImage ...` to function. diff --git a/pkg/image/registry/imagestream/strategy_test.go b/pkg/image/registry/imagestream/strategy_test.go index c51cffd63ce3..f7030ba7920d 100644 --- a/pkg/image/registry/imagestream/strategy_test.go +++ b/pkg/image/registry/imagestream/strategy_test.go @@ -356,8 +356,6 @@ func TestTagsChanged(t *testing.T) { }}, "t2": {Items: []api.TagEvent{ { - //TODO use the line below when we're on Docker 1.6 with true pull by digest - //DockerImageReference: "registry:5000/ns/stream@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", DockerImageReference: "registry:5000/ns/stream@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, }}, @@ -510,9 +508,7 @@ func TestTagsChanged(t *testing.T) { "t1": { Items: []api.TagEvent{ { - //TODO use the line below when we're on Docker 1.6 with true pull by digest - //DockerImageReference: "registry:5000/ns/stream@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - DockerImageReference: "registry:5000/ns/stream:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + DockerImageReference: "registry:5000/ns/stream@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", Image: "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, }, diff --git a/pkg/image/registry/imagestreamtag/rest.go b/pkg/image/registry/imagestreamtag/rest.go index 369362102d8e..4187532345bd 100644 --- a/pkg/image/registry/imagestreamtag/rest.go +++ b/pkg/image/registry/imagestreamtag/rest.go @@ -88,7 +88,8 @@ func (r *REST) Get(ctx kapi.Context, id string) (runtime.Object, error) { } ist := api.ImageStreamTag{ - Image: *imageWithMetadata, + Image: *imageWithMetadata, + ImageName: imageWithMetadata.Name, } ist.Namespace = kapi.NamespaceValue(ctx) ist.Name = id diff --git a/test/integration/v2_docker_registry_test.go b/test/integration/v2_docker_registry_test.go index e036dbe81090..cfb1df6f1255 100644 --- a/test/integration/v2_docker_registry_test.go +++ b/test/integration/v2_docker_registry_test.go @@ -151,7 +151,8 @@ middleware: t.Fatalf("expected latest, got %q", tags[0]) } - url := fmt.Sprintf("http://127.0.0.1:5000/v2/%s/%s/manifests/%s", testutil.Namespace(), stream.Name, dgst.String()) + // test get by tag + url := fmt.Sprintf("http://127.0.0.1:5000/v2/%s/%s/manifests/%s", testutil.Namespace(), stream.Name, imageapi.DefaultImageTag) req, err := http.NewRequest("GET", url, nil) if err != nil { t.Fatalf("error creating request: %v", err) @@ -177,6 +178,32 @@ middleware: t.Fatalf("unexpected manifest tag: %s", retrievedManifest.Tag) } + // test get by digest + url = fmt.Sprintf("http://127.0.0.1:5000/v2/%s/%s/manifests/%s", testutil.Namespace(), stream.Name, dgst.String()) + req, err = http.NewRequest("GET", url, nil) + if err != nil { + t.Fatalf("error creating request: %v", err) + } + req.SetBasicAuth(user, token) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("error retrieving manifest from registry: %s", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status code: %d", resp.StatusCode) + } + body, err = ioutil.ReadAll(resp.Body) + if err := json.Unmarshal(body, &retrievedManifest); err != nil { + t.Fatalf("error unmarshaling retrieved manifest") + } + if retrievedManifest.Name != fmt.Sprintf("%s/%s", testutil.Namespace(), stream.Name) { + t.Fatalf("unexpected manifest name: %s", retrievedManifest.Name) + } + if retrievedManifest.Tag != imageapi.DefaultImageTag { + t.Fatalf("unexpected manifest tag: %s", retrievedManifest.Tag) + } + image, err := adminClient.ImageStreamImages(testutil.Namespace()).Get(stream.Name, dgst.String()) if err != nil { t.Fatalf("error getting imageStreamImage: %s", err)